bitkeeper revision 1.1085 (40f5624559Jpit7H8wc4PzDqjO-E2g)
authorcl349@freefall.cl.cam.ac.uk <cl349@freefall.cl.cam.ac.uk>
Wed, 14 Jul 2004 16:41:41 +0000 (16:41 +0000)
committercl349@freefall.cl.cam.ac.uk <cl349@freefall.cl.cam.ac.uk>
Wed, 14 Jul 2004 16:41:41 +0000 (16:41 +0000)
Initial checkin of Linux 2.6.

98 files changed:
.rootkeys
BitKeeper/etc/ignore
BitKeeper/etc/logging_ok
linux-2.6.7-xen-sparse/arch/xen/Kconfig [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/Kconfig.drivers [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/boot/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/defconfig [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/Kconfig [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/common.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/entry.S [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/evtchn.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/head.S [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/irq.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/ldt.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/process.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/setup.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/signal.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/sysenter.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/time.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/timers/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/traps.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vsyscall.S [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vsyscall.lds [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/mm/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/mm/fault.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/mm/init.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/i386/mm/pgtable.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/kernel/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/kernel/empty.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/kernel/process.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/arch/xen/kernel/reboot.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/block/Kconfig [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/block/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/block/block.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/block/block.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/block/vbd.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/console/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/console/console.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/evtchn/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/evtchn/evtchn.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/net/Kconfig [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/net/Makefile [new file with mode: 0644]
linux-2.6.7-xen-sparse/drivers/xen/net/network.c [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/desc.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/e820.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/fixmap.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/hypervisor.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_resources.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/msr.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/page.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/param.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgalloc.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/processor.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/ptrace.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/segment.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/setup.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/system.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/timer.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/tlbflush.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/xor.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/blkif.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/ctrl_if.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/domain_controller.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/evtchn.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/arch-x86_32.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/arch-x86_64.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/dom0_ops.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/event_channel.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/hypervisor-if.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/physdev.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/sched_ctl.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/trace.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/multicall.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/netif.h [new file with mode: 0644]
linux-2.6.7-xen-sparse/include/asm-xen/xen.h [new file with mode: 0644]

index f11d07c06a216802398760acecf6523c8ff9bc40..8e7e9a878530ef33b19a3dfdd8413c64d8407171 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3e5a4e681xMPdF9xCMwpyfuYMySU5g linux-2.4.26-xen-sparse/mm/mremap.c
 409ba2e7akOFqQUg6Qyg2s28xcXiMg linux-2.4.26-xen-sparse/mm/page_alloc.c
 3e5a4e683HKVU-sxtagrDasRB8eBVw linux-2.4.26-xen-sparse/mm/swapfile.c
+40f562372u3A7_kfbYYixPHJJxYUxA linux-2.6.7-xen-sparse/arch/xen/Kconfig
+40f56237utH41NPukqHksuNf29IC9A linux-2.6.7-xen-sparse/arch/xen/Kconfig.drivers
+40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.7-xen-sparse/arch/xen/Makefile
+40f56237JTc60m1FRlUxkUaGSQKrNw linux-2.6.7-xen-sparse/arch/xen/boot/Makefile
+40f56237wubfjJKlfIzZlI3ZM2VgGA linux-2.6.7-xen-sparse/arch/xen/defconfig
+40f56237Mta0yHNaMS_qtM2rge0qYA linux-2.6.7-xen-sparse/arch/xen/i386/Kconfig
+40f56238u2CJdXNpjsZgHBxeVyY-2g linux-2.6.7-xen-sparse/arch/xen/i386/Makefile
+40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6.7-xen-sparse/arch/xen/i386/kernel/Makefile
+40f56238rXVTJQKbBuXXLH52qEArcg linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
+40f562385s4lr6Zg92gExe7UQ4A76Q linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/common.c
+40f56238XDtHSijkAFlbv1PT8Bhw_Q linux-2.6.7-xen-sparse/arch/xen/i386/kernel/entry.S
+40f56238xFQe9T7M_U_FItM-bZIpLw linux-2.6.7-xen-sparse/arch/xen/i386/kernel/evtchn.c
+40f56238bnvciAuyzAiMkdzGErYt1A linux-2.6.7-xen-sparse/arch/xen/i386/kernel/head.S
+40f562382aC3_Gt4RG-4ZsfvDRUg3Q linux-2.6.7-xen-sparse/arch/xen/i386/kernel/irq.c
+40f56238ue3YRsK52HG7iccNzP1AwQ linux-2.6.7-xen-sparse/arch/xen/i386/kernel/ldt.c
+40f56238a8iOVDEoostsbun_sy2i4g linux-2.6.7-xen-sparse/arch/xen/i386/kernel/process.c
+40f56238YQIJoYG2ehDGEcdTgLmGbg linux-2.6.7-xen-sparse/arch/xen/i386/kernel/setup.c
+40f56238nWMQg7CKbyTy0KJNvCzbtg linux-2.6.7-xen-sparse/arch/xen/i386/kernel/signal.c
+40f56238UL9uv78ODDzMwLL9yryeFw linux-2.6.7-xen-sparse/arch/xen/i386/kernel/sysenter.c
+40f56238qVGkpO_ycnQA8k03kQzAgA linux-2.6.7-xen-sparse/arch/xen/i386/kernel/time.c
+40f56238NzTgeO63RGoxHrW5NQeO3Q linux-2.6.7-xen-sparse/arch/xen/i386/kernel/timers/Makefile
+40f56238BMqG5PuSHufpjbvp_helBw linux-2.6.7-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
+40f562389xNa78YBZciUibQjyRU_Lg linux-2.6.7-xen-sparse/arch/xen/i386/kernel/traps.c
+40f56238qASEI_IOhCKWNuwFKNZrKQ linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S
+40f56238JypKAUG01ZojFwH7qnZ5uA linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vsyscall.S
+40f56238wi6AdNQjm0RT57bSkwb6hg linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
+40f56238a3w6-byOzexIlMgni76Lcg linux-2.6.7-xen-sparse/arch/xen/i386/mm/Makefile
+40f56238ILx8xlbywNbzTdv5Zr4xXQ linux-2.6.7-xen-sparse/arch/xen/i386/mm/fault.c
+40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c
+40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.7-xen-sparse/arch/xen/i386/mm/init.c
+40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.7-xen-sparse/arch/xen/i386/mm/pgtable.c
+40f56239zOksGg_H4XD4ye6iZNtoZA linux-2.6.7-xen-sparse/arch/xen/kernel/Makefile
+40f56239bvOjuuuViZ0XMlNiREFC0A linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c
+40f56239pYRq5yshPTkv3ujXKc8K6g linux-2.6.7-xen-sparse/arch/xen/kernel/empty.c
+40f56239sFcjHiIRmnObRIDF-zaeKQ linux-2.6.7-xen-sparse/arch/xen/kernel/process.c
+40f562392LBhwmOxVPsYdkYXMxI_ZQ linux-2.6.7-xen-sparse/arch/xen/kernel/reboot.c
+40f56239Dp_vMTgz8TEbvo1hjHGc3w linux-2.6.7-xen-sparse/drivers/xen/Makefile
+40f56239Sfle6wGv5FS0wjS_HI150A linux-2.6.7-xen-sparse/drivers/xen/block/Kconfig
+40f562395atl9x4suKGhPkjqLOXESg linux-2.6.7-xen-sparse/drivers/xen/block/Makefile
+40f56239-JNIaTzlviVJohVdoYOUpw linux-2.6.7-xen-sparse/drivers/xen/block/block.c
+40f56239y9naBTXe40Pi2J_z3p-d1g linux-2.6.7-xen-sparse/drivers/xen/block/block.h
+40f56239BVfPsXBiWQitXgDRtOsiqg linux-2.6.7-xen-sparse/drivers/xen/block/vbd.c
+40f56239fsLjvtD8YBRAWphps4FDjg linux-2.6.7-xen-sparse/drivers/xen/console/Makefile
+40f56239afE58Oot-9omHGqq4UJ--A linux-2.6.7-xen-sparse/drivers/xen/console/console.c
+40f56239KYxO0YabhPzCTeUuln-lnA linux-2.6.7-xen-sparse/drivers/xen/evtchn/Makefile
+40f56239DoibTX6R-ZYd3QTXAB8_TA linux-2.6.7-xen-sparse/drivers/xen/evtchn/evtchn.c
+40f56239lrg_Ob0BJ8WBFS1zeg2CYw linux-2.6.7-xen-sparse/drivers/xen/net/Kconfig
+40f56239Wd4k_ycG_mFsSO1r5xKdtQ linux-2.6.7-xen-sparse/drivers/xen/net/Makefile
+40f56239vUsbJCS9tGFfRVi1YZlGEg linux-2.6.7-xen-sparse/drivers/xen/net/network.c
+40f56239YAjS52QG2FIAQpHDZAdGHg linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/desc.h
+40f5623anSzpuEHgiNmQ56fIRfCoaQ linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/e820.h
+40f5623akIoBsQ3KxSB2kufkbgONXQ linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/fixmap.h
+40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/hypervisor.h
+40f5623aJVXQwpJMOLE99XgvGsfQ8Q linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h
+40f5623am9BzluYFuV6EQfTd-so3dA linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h
+40f5623adZQ1IZGPxbDXONjyZGYuTA linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h
+40f5623aKXkBBxgpLx2NcvkncQ1Yyw linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
+40f5623aMQZoYuf4ml9v69N3gu8ing linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h
+40f5623a8LroVMnZ5YRzJJmIc-zHlw linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h
+40f5623an3wOvFKmpIvqSxQfWzklVQ linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_resources.h
+40f5623ayR1vnzfF__htza35a8Ft-g linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h
+40f5623a4YdRdVzYWJzOOoqe8mnrXA linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h
+40f5623aDLxmbOtUHvkWztKjAO4EjA linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h
+40f5623aDMCsWOFO0jktZ4e8sjwvEg linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
+40f5623arsFXkGdPvIqvFi3yFXGR0Q linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
+40f5623aFTyFTR-vdiA-KaGxk5JOKQ linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/msr.h
+40f5623adgjZq9nAgCt0IXdWl7udSA linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/page.h
+40f5623a54NuG-7qHihGYmw4wWQnMA linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/param.h
+40f5623atCokYc2uCysSJ8jFO8TEsw linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
+40f5623aEToIXouJgO-ao5d5pcEt1w linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
+40f5623aCCXRPlGpNthVXstGz9ZV3A linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h
+40f5623aPCkQQfPtJSooGdhcatrvnQ linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/processor.h
+40f5623bvhcUmESJrtcII6Bmd61b3w linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/ptrace.h
+40f5623bzLvxr7WoJIxVf2OH4rCBJg linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/segment.h
+40f5623bG_LzgG6-qwk292nTc5Wabw linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/setup.h
+40f5623bgzm_9vwxpzJswlAxg298Gg linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
+40f5623bVdKP7Dt7qm8twu3NcnGNbA linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/system.h
+40f5623bSgGrvrGRpD71K-lIYqaGgg linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/timer.h
+40f5623bc8LKPRO09wY5dGDnY_YCpw linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
+40f5623bxUbeGjkRrjDguCy_Gm8RLw linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/xor.h
+40f5623bqoi4GEoBiiUc6TZk1HjsMg linux-2.6.7-xen-sparse/include/asm-xen/blkif.h
+40f5623bYNP7tHE2zX6YQxp9Zq2utQ linux-2.6.7-xen-sparse/include/asm-xen/ctrl_if.h
+40f5623bDU2mp4xcHrO0ThodQ9Vs7w linux-2.6.7-xen-sparse/include/asm-xen/domain_controller.h
+40f5623b3Eqs8pAc5WpPX8_jTzV2qw linux-2.6.7-xen-sparse/include/asm-xen/evtchn.h
+40f5623bSHoOzh_pYP9ovjpUz019Aw linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/arch-x86_32.h
+40f5623c1uIAB_OVr5AFdoOac7zxHA linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/arch-x86_64.h
+40f5623cEbvTM2QIJ8G6kJoYMLvFpw linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/dom0_ops.h
+40f5623cBiv7JBB2bwezpyMwyDGN_w linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/event_channel.h
+40f5623cvr4j1BQI1I82_K5wRocUKg linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/hypervisor-if.h
+40f5623cfHK4EBsPz922OqMVkZX6OA linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/physdev.h
+40f5623cFINsV23lGJNQNbhO5kus_Q linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/sched_ctl.h
+40f5623cb_pJrLt3h_nAzvJsjsV0rQ linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/trace.h
+40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.7-xen-sparse/include/asm-xen/multicall.h
+40f5623cTZ80EwjWUBlh44A9F9i_Lg linux-2.6.7-xen-sparse/include/asm-xen/netif.h
+40f5623cBiQhPHILVLrl3xa6bDBaRg linux-2.6.7-xen-sparse/include/asm-xen/xen.h
 40e1b09db5mN69Ijj0X_Eol-S7dXiw tools/Make.defs
 3f776bd1Hy9rn69ntXBhPReUFw9IEA tools/Makefile
 401d7e160vaxMBAUSLSicuZ7AQjJ3w tools/examples/Makefile
index 8f11ded149d86f128ae30138513e6fbac2f23bc8..60cc1f1a44142f0ff191cc34649b0c221c2ac92b 100644 (file)
@@ -13,8 +13,8 @@ TAGS
 extras/mini-os/h/hypervisor-ifs
 install
 install/*
-linux-*-xen*/*
-linux-2.4.26-xen/*
+linux-*-xen0/*
+linux-*-xenU/*
 linux-xen-sparse
 patches/*
 tools/*/build/lib*/*.py
index 94cfc0620d04ddf02fb41e667dc5fd9f3a9d165f..34d574e882be97f5e5092ef5b30f13617d8f4dd4 100644 (file)
@@ -9,6 +9,7 @@ bd240@labyrinth.cl.cam.ac.uk
 br260@br260.wolfson.cam.ac.uk
 br260@labyrinth.cl.cam.ac.uk
 br260@laudney.cl.cam.ac.uk
+cl349@freefall.cl.cam.ac.uk
 djm@kirby.fc.hp.com
 gm281@boulderdash.cl.cam.ac.uk
 iap10@freefall.cl.cam.ac.uk
diff --git a/linux-2.6.7-xen-sparse/arch/xen/Kconfig b/linux-2.6.7-xen-sparse/arch/xen/Kconfig
new file mode 100644 (file)
index 0000000..659a577
--- /dev/null
@@ -0,0 +1,67 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+mainmenu "Linux Kernel Configuration"
+
+config XEN
+       bool
+       default y
+       help
+         This is the Linux Xen port.
+
+config ARCH_XEN
+       bool
+       default y
+
+
+source "init/Kconfig"
+
+#config VT
+#      bool
+#      default y
+
+#config VT_CONSOLE
+#      bool
+#      default y
+
+#config HW_CONSOLE
+#      bool
+#      default y
+
+choice
+       prompt "Processor Type"
+       default X86
+
+config X86
+       bool "X86"
+       help
+         Choose this option if your computer is a X86 architecture.
+
+config X86_64
+       bool "X86_64"
+       help
+         Choose this option if your computer is a X86 architecture.
+
+endchoice
+
+if X86
+source "arch/xen/i386/Kconfig"
+endif
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "arch/xen/Kconfig.drivers"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/linux-2.6.7-xen-sparse/arch/xen/Kconfig.drivers b/linux-2.6.7-xen-sparse/arch/xen/Kconfig.drivers
new file mode 100644 (file)
index 0000000..ee1eeb1
--- /dev/null
@@ -0,0 +1,71 @@
+# arch/xen/Kconfig.drivers
+
+menu "Device Drivers"
+
+source "drivers/base/Kconfig"
+
+#source "drivers/block/Kconfig"
+
+source "net/Kconfig"
+
+#source "drivers/input/Kconfig"
+
+config INPUT
+       tristate "Input devices (needed for keyboard, mouse, ...)" if EMBEDDED
+       default y
+       ---help---
+         Say Y here if you have any input device (mouse, keyboard, tablet,
+         joystick, steering wheel ...) connected to your system and want
+         it to be available to applications. This includes standard PS/2
+         keyboard and mouse.
+
+         Say N here if you have a headless (no monitor, no keyboard) system.
+
+         More information is available: <file:Documentation/input/input.txt>
+
+         If unsure, say Y.
+
+         To compile this driver as a module, choose M here: the
+         module will be called input.
+
+#source "drivers/char/Kconfig"
+
+config UNIX98_PTYS
+       bool "Unix98 PTY support" if EMBEDDED
+       default y
+       ---help---
+         A pseudo terminal (PTY) is a software device consisting of two
+         halves: a master and a slave. The slave device behaves identical to
+         a physical terminal; the master device is used by a process to
+         read data from and write data to the slave, thereby emulating a
+         terminal. Typical programs for the master side are telnet servers
+         and xterms.
+
+         Linux has traditionally used the BSD-like names /dev/ptyxx for
+         masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
+         has a number of problems. The GNU C library glibc 2.1 and later,
+         however, supports the Unix98 naming standard: in order to acquire a
+         pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
+         terminal is then made available to the process and the pseudo
+         terminal slave can be accessed as /dev/pts/<number>. What was
+         traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
+
+         All modern Linux systems use the Unix98 ptys.  Say Y unless
+         you're on an embedded system and want to conserve memory.
+
+
+#source "drivers/video/Kconfig"
+
+#config XEN_EVTCHN
+#      bool "Xen Event Channel"
+#      depends on XEN
+#      default Y
+#
+#config XEN_CONSOLE
+#      bool "Xen Console"
+#      depends on XEN && XEN_EVTCHN
+#      default Y
+#      help
+#        Say Y to build a console driver for Xen.
+
+endmenu
diff --git a/linux-2.6.7-xen-sparse/arch/xen/Makefile b/linux-2.6.7-xen-sparse/arch/xen/Makefile
new file mode 100644 (file)
index 0000000..e0b8b37
--- /dev/null
@@ -0,0 +1,65 @@
+#
+# xen/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" cleaning up for this architecture.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2004 by Christian Limpach
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+# pick up headers from include/asm-xen/asm in preference over include/asm
+NOSTDINC_FLAGS  = -nostdinc -iwithprefix include/asm-xen -Iinclude/asm-xen -iwithprefix include
+
+# make uname return the processor arch
+UTS_MACHINE := $(XENARCH)
+
+core-y += arch/xen/kernel/
+
+drivers-y      += drivers/xen/
+
+include/.asm-ignore:
+       @rm -f include/.asm-ignore
+       @mv include/asm include/.asm-ignore
+       @echo '  SYMLINK include/asm -> include/asm-$(XENARCH)'
+       $(Q)if [ ! -d include ]; then mkdir -p include; fi;
+       @ln -fsn asm-$(XENARCH) include/asm
+
+include/asm-xen/asm:
+       @echo '  SYMLINK $@ -> include/asm-xen/asm-$(XENARCH)'
+       @ln -fsn asm-$(XENARCH) $@
+
+include/asm-xen/asm-$(XENARCH)/hypervisor-ifs:
+       @echo '  SYMLINK $@ -> include/asm-xen/hypervisor-ifs'
+       @ln -fsn ../hypervisor-ifs $@
+
+arch/xen/arch:
+       @rm -f $@
+       @ln -fsn $(XENARCH) $@
+
+prepare: include/.asm-ignore include/asm-xen/asm \
+       include/asm-xen/asm-$(XENARCH)/hypervisor-ifs \
+       arch/xen/arch ;
+
+all: vmlinuz
+
+vmlinuz: vmlinux
+       $(Q)$(MAKE) $(build)=arch/xen/boot vmlinuz
+
+archclean:
+       @if [ -e arch/xen/arch ]; then $(MAKE) $(clean)=arch/xen/arch; fi;
+       @rm -f arch/xen/arch include/.asm-ignore include/asm-xen/asm
+
+define archhelp
+  echo  '* vmlinuz     - Compressed kernel image'
+endef
+
+ifneq ($(XENARCH),)
+include        $(srctree)/arch/xen/$(XENARCH)/Makefile
+endif
diff --git a/linux-2.6.7-xen-sparse/arch/xen/boot/Makefile b/linux-2.6.7-xen-sparse/arch/xen/boot/Makefile
new file mode 100644 (file)
index 0000000..3bedc11
--- /dev/null
@@ -0,0 +1,3 @@
+
+vmlinuz: vmlinux FORCE
+       $(call if_changed,gzip)
diff --git a/linux-2.6.7-xen-sparse/arch/xen/defconfig b/linux-2.6.7-xen-sparse/arch/xen/defconfig
new file mode 100644 (file)
index 0000000..bbc2c16
--- /dev/null
@@ -0,0 +1,373 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+CONFIG_X86=y
+# CONFIG_X86_64 is not set
+
+#
+# X86 Processor Configuration
+#
+CONFIG_XENARCH="i386"
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+CONFIG_MPENTIUM4=y
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP2 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_X86_GENERIC is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_HPET_EMULATE_RTC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+CONFIG_X86_CPUID=y
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+# CONFIG_HIGHMEM64G is not set
+CONFIG_HAVE_DEC_LOCK=y
+# CONFIG_REGPARM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_X86_STD_RESOURCES=y
+CONFIG_PC=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+CONFIG_INPUT=y
+CONFIG_UNIX98_PTYS=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_FAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=y
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/Kconfig b/linux-2.6.7-xen-sparse/arch/xen/i386/Kconfig
new file mode 100644 (file)
index 0000000..6014f5b
--- /dev/null
@@ -0,0 +1,784 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+menu "X86 Processor Configuration"
+
+config XENARCH
+       string
+       default i386
+
+config MMU
+       bool
+       default y
+
+config SBUS
+       bool
+
+config UID16
+       bool
+       default y
+
+config GENERIC_ISA_DMA
+       bool
+       default y
+
+
+choice
+       prompt "Processor family"
+       default M686
+
+config M386
+       bool "386"
+       ---help---
+         This is the processor type of your CPU. This information is used for
+         optimizing purposes. In order to compile a kernel that can run on
+         all x86 CPU types (albeit not optimally fast), you can specify
+         "386" here.
+
+         The kernel will not necessarily run on earlier architectures than
+         the one you have chosen, e.g. a Pentium optimized kernel will run on
+         a PPro, but not necessarily on a i486.
+
+         Here are the settings recommended for greatest speed:
+         - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
+         486DLC/DLC2, UMC 486SX-S and NexGen Nx586.  Only "386" kernels
+         will run on a 386 class machine.
+         - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
+         SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
+         - "586" for generic Pentium CPUs lacking the TSC
+         (time stamp counter) register.
+         - "Pentium-Classic" for the Intel Pentium.
+         - "Pentium-MMX" for the Intel Pentium MMX.
+         - "Pentium-Pro" for the Intel Pentium Pro.
+         - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
+         - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
+         - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
+         - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
+         - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
+         - "Crusoe" for the Transmeta Crusoe series.
+         - "Winchip-C6" for original IDT Winchip.
+         - "Winchip-2" for IDT Winchip 2.
+         - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
+         - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
+         - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
+
+         If you don't know what to do, choose "386".
+
+config M486
+       bool "486"
+       help
+         Select this for a 486 series processor, either Intel or one of the
+         compatible processors from AMD, Cyrix, IBM, or Intel.  Includes DX,
+         DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
+         U5S.
+
+config M586
+       bool "586/K5/5x86/6x86/6x86MX"
+       help
+         Select this for an 586 or 686 series processor such as the AMD K5,
+         the Intel 5x86 or 6x86, or the Intel 6x86MX.  This choice does not
+         assume the RDTSC (Read Time Stamp Counter) instruction.
+
+config M586TSC
+       bool "Pentium-Classic"
+       help
+         Select this for a Pentium Classic processor with the RDTSC (Read
+         Time Stamp Counter) instruction for benchmarking.
+
+config M586MMX
+       bool "Pentium-MMX"
+       help
+         Select this for a Pentium with the MMX graphics/multimedia
+         extended instructions.
+
+config M686
+       bool "Pentium-Pro"
+       help
+         Select this for Intel Pentium Pro chips.  This enables the use of
+         Pentium Pro extended instructions, and disables the init-time guard
+         against the f00f bug found in earlier Pentiums.
+
+config MPENTIUMII
+       bool "Pentium-II/Celeron(pre-Coppermine)"
+       help
+         Select this for Intel chips based on the Pentium-II and
+         pre-Coppermine Celeron core.  This option enables an unaligned
+         copy optimization, compiles the kernel with optimization flags
+         tailored for the chip, and applies any applicable Pentium Pro
+         optimizations.
+
+config MPENTIUMIII
+       bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
+       help
+         Select this for Intel chips based on the Pentium-III and
+         Celeron-Coppermine core.  This option enables use of some
+         extended prefetch instructions in addition to the Pentium II
+         extensions.
+
+config MPENTIUMM
+       bool "Pentium M"
+       help
+         Select this for Intel Pentium M (not Pentium-4 M)
+         notebook chips.
+
+config MPENTIUM4
+       bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
+       help
+         Select this for Intel Pentium 4 chips.  This includes the
+         Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
+         (not Pentium M) chips.  This option enables compile flags
+         optimized for the chip, uses the correct cache shift, and
+         applies any applicable Pentium III optimizations.
+
+config MK6
+       bool "K6/K6-II/K6-III"
+       help
+         Select this for an AMD K6-family processor.  Enables use of
+         some extended instructions, and passes appropriate optimization
+         flags to GCC.
+
+config MK7
+       bool "Athlon/Duron/K7"
+       help
+         Select this for an AMD Athlon K7-family processor.  Enables use of
+         some extended instructions, and passes appropriate optimization
+         flags to GCC.
+
+config MK8
+       bool "Opteron/Athlon64/Hammer/K8"
+       help
+         Select this for an AMD Opteron or Athlon64 Hammer-family processor.  Enables
+         use of some extended instructions, and passes appropriate optimization
+         flags to GCC.
+
+config MCRUSOE
+       bool "Crusoe"
+       help
+         Select this for a Transmeta Crusoe processor.  Treats the processor
+         like a 586 with TSC, and sets some GCC optimization flags (like a
+         Pentium Pro with no alignment requirements).
+
+config MWINCHIPC6
+       bool "Winchip-C6"
+       help
+         Select this for an IDT Winchip C6 chip.  Linux and GCC
+         treat this chip as a 586TSC with some extended instructions
+         and alignment requirements.
+
+config MWINCHIP2
+       bool "Winchip-2"
+       help
+         Select this for an IDT Winchip-2.  Linux and GCC
+         treat this chip as a 586TSC with some extended instructions
+         and alignment requirements.
+
+config MWINCHIP3D
+       bool "Winchip-2A/Winchip-3"
+       help
+         Select this for an IDT Winchip-2A or 3.  Linux and GCC
+         treat this chip as a 586TSC with some extended instructions
+         and alignment reqirements.  Also enable out of order memory
+         stores for this CPU, which can increase performance of some
+         operations.
+
+config MCYRIXIII
+       bool "CyrixIII/VIA-C3"
+       help
+         Select this for a Cyrix III or C3 chip.  Presently Linux and GCC
+         treat this chip as a generic 586. Whilst the CPU is 686 class,
+         it lacks the cmov extension which gcc assumes is present when
+         generating 686 code.
+         Note that Nehemiah (Model 9) and above will not boot with this
+         kernel due to them lacking the 3DNow! instructions used in earlier
+         incarnations of the CPU.
+
+config MVIAC3_2
+       bool "VIA C3-2 (Nehemiah)"
+       help
+         Select this for a VIA C3 "Nehemiah". Selecting this enables usage
+         of SSE and tells gcc to treat the CPU as a 686.
+         Note, this kernel will not boot on older (pre model 9) C3s.
+
+endchoice
+
+config X86_GENERIC
+       bool "Generic x86 support" 
+       help
+         Instead of just including optimizations for the selected
+         x86 variant (e.g. PII, Crusoe or Athlon), include some more
+         generic optimizations as well. This will make the kernel
+         perform better on x86 CPUs other than that selected.
+
+         This is really intended for distributors who need more
+         generic optimizations.
+
+#
+# Define implied options from the CPU selection here
+#
+config X86_CMPXCHG
+       bool
+       depends on !M386
+       default y
+
+config X86_XADD
+       bool
+       depends on !M386
+       default y
+
+config X86_L1_CACHE_SHIFT
+       int
+       default "7" if MPENTIUM4 || X86_GENERIC
+       default "4" if X86_ELAN || M486 || M386
+       default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2
+       default "6" if MK7 || MK8 || MPENTIUMM
+
+config RWSEM_GENERIC_SPINLOCK
+       bool
+       depends on M386
+       default y
+
+config RWSEM_XCHGADD_ALGORITHM
+       bool
+       depends on !M386
+       default y
+
+config X86_PPRO_FENCE
+       bool
+       depends on M686 || M586MMX || M586TSC || M586 || M486 || M386
+       default y
+
+config X86_F00F_BUG
+       bool
+       depends on M586MMX || M586TSC || M586 || M486 || M386
+       default y
+
+config X86_WP_WORKS_OK
+       bool
+       depends on !M386
+       default y
+
+config X86_INVLPG
+       bool
+       depends on !M386
+       default y
+
+config X86_BSWAP
+       bool
+       depends on !M386
+       default y
+
+config X86_POPAD_OK
+       bool
+       depends on !M386
+       default y
+
+config X86_ALIGNMENT_16
+       bool
+       depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
+       default y
+
+config X86_GOOD_APIC
+       bool
+       depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8
+       default y
+
+config X86_INTEL_USERCOPY
+       bool
+       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7
+       default y
+
+config X86_USE_PPRO_CHECKSUM
+       bool
+       depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2
+       default y
+
+config X86_USE_3DNOW
+       bool
+       depends on MCYRIXIII || MK7
+       default y
+
+config X86_OOSTORE
+       bool
+       depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
+       default y
+
+config HPET_TIMER
+       bool "HPET Timer Support"
+       help
+         This enables the use of the HPET for the kernel's internal timer.
+         HPET is the next generation timer replacing legacy 8254s.
+         You can safely choose Y here.  However, HPET will only be
+         activated if the platform and the BIOS support this feature.
+         Otherwise the 8254 will be used for timing services.
+
+         Choose N to continue using the legacy 8254 timer.
+
+config HPET_EMULATE_RTC
+       def_bool HPET_TIMER && RTC=y
+
+config SMP
+       bool "Symmetric multi-processing support"
+       ---help---
+         This enables support for systems with more than one CPU. If you have
+         a system with only one CPU, like most personal computers, say N. If
+         you have a system with more than one CPU, say Y.
+
+         If you say N here, the kernel will run on single and multiprocessor
+         machines, but will use only one CPU of a multiprocessor machine. If
+         you say Y here, the kernel will run on many, but not all,
+         singleprocessor machines. On a singleprocessor machine, the kernel
+         will run faster if you say N here.
+
+         Note that if you say Y here and choose architecture "586" or
+         "Pentium" under "Processor family", the kernel will not work on 486
+         architectures. Similarly, multiprocessor kernels for the "PPro"
+         architecture may not work on all Pentium based boards.
+
+         People using multiprocessor machines who say Y here should also say
+         Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
+         Management" code will be disabled if you say Y here.
+
+         See also the <file:Documentation/smp.txt>,
+         <file:Documentation/i386/IO-APIC.txt>,
+         <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+         <http://www.tldp.org/docs.html#howto>.
+
+         If you don't know what to do here, say N.
+
+config NR_CPUS
+       int "Maximum number of CPUs (2-255)"
+       range 2 255
+       depends on SMP
+       default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
+       default "8"
+       help
+         This allows you to specify the maximum number of CPUs which this
+         kernel will support.  The maximum supported value is 255 and the
+         minimum value which makes sense is 2.
+
+         This is purely to save memory - each supported CPU adds
+         approximately eight kilobytes to the kernel image.
+
+config SCHED_SMT
+       bool "SMT (Hyperthreading) scheduler support"
+       depends on SMP
+       default off
+       help
+         SMT scheduler support improves the CPU scheduler's decision making
+         when dealing with Intel Pentium 4 chips with HyperThreading at a
+         cost of slightly increased overhead in some places. If unsure say
+         N here.
+
+config PREEMPT
+       bool "Preemptible Kernel"
+       help
+         This option reduces the latency of the kernel when reacting to
+         real-time or interactive events by allowing a low priority process to
+         be preempted even if it is in kernel mode executing a system call.
+         This allows applications to run more reliably even when the system is
+         under load.
+
+         Say Y here if you are building a kernel for a desktop, embedded
+         or real-time system.  Say N if you are unsure.
+
+#config X86_TSC
+#       bool
+#      depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2) && !X86_NUMAQ
+#       default y
+
+#config X86_MCE
+#       bool "Machine Check Exception"
+#      depends on !X86_VOYAGER
+#       ---help---
+#         Machine Check Exception support allows the processor to notify the
+#         kernel if it detects a problem (e.g. overheating, component failure).
+#         The action the kernel takes depends on the severity of the problem,
+#         ranging from a warning message on the console, to halting the machine.
+#         Your processor must be a Pentium or newer to support this - check the
+#         flags in /proc/cpuinfo for mce.  Note that some older Pentium systems
+#         have a design flaw which leads to false MCE events - hence MCE is
+#         disabled on all P5 processors, unless explicitly enabled with "mce"
+#         as a boot argument.  Similarly, if MCE is built in and creates a
+#         problem on some new non-standard machine, you can boot with "nomce"
+#         to disable it.  MCE support simply ignores non-MCE processors like
+#         the 386 and 486, so nearly everyone can say Y here.
+
+#config X86_MCE_NONFATAL
+#      tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
+#       depends on X86_MCE
+#       help
+#         Enabling this feature starts a timer that triggers every 5 seconds which
+#         will look at the machine check registers to see if anything happened.
+#         Non-fatal problems automatically get corrected (but still logged).
+#         Disable this if you don't want to see these messages.
+#         Seeing the messages this option prints out may be indicative of dying hardware,
+#         or out-of-spec (ie, overclocked) hardware.
+#         This option only does something on certain CPUs.
+#         (AMD Athlon/Duron and Intel Pentium 4)
+
+#config X86_MCE_P4THERMAL
+#       bool "check for P4 thermal throttling interrupt."
+#       depends on X86_MCE && (X86_UP_APIC || SMP)
+#       help
+#         Enabling this feature will cause a message to be printed when the P4
+#         enters thermal throttling.
+
+#config MICROCODE
+#       tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
+#       ---help---
+#         If you say Y here and also to "/dev file system support" in the
+#         'File systems' section, you will be able to update the microcode on
+#         Intel processors in the IA32 family, e.g. Pentium Pro, Pentium II,
+#         Pentium III, Pentium 4, Xeon etc.  You will obviously need the
+#         actual microcode binary data itself which is not shipped with the
+#         Linux kernel.
+#
+#         For latest news and information on obtaining all the required
+#         ingredients for this driver, check:
+#         <http://www.urbanmyth.org/microcode/>.
+#
+#         To compile this driver as a module, choose M here: the
+#         module will be called microcode.
+
+#config X86_MSR
+#       tristate "/dev/cpu/*/msr - Model-specific register support"
+#       help
+#         This device gives privileged processes access to the x86
+#         Model-Specific Registers (MSRs).  It is a character device with
+#         major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
+#         MSR accesses are directed to a specific CPU on multi-processor
+#         systems.
+
+config X86_CPUID
+       tristate "/dev/cpu/*/cpuid - CPU information support"
+       help
+         This device gives processes access to the x86 CPUID instruction to
+         be executed on a specific processor.  It is a character device
+         with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+         /dev/cpu/31/cpuid.
+
+source "drivers/firmware/Kconfig"
+
+choice
+       prompt "High Memory Support"
+       default NOHIGHMEM
+
+config NOHIGHMEM
+       bool "off"
+       ---help---
+         Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+         However, the address space of 32-bit x86 processors is only 4
+         Gigabytes large. That means that, if you have a large amount of
+         physical memory, not all of it can be "permanently mapped" by the
+         kernel. The physical memory that's not permanently mapped is called
+         "high memory".
+
+         If you are compiling a kernel which will never run on a machine with
+         more than 1 Gigabyte total physical RAM, answer "off" here (default
+         choice and suitable for most users). This will result in a "3GB/1GB"
+         split: 3GB are mapped so that each process sees a 3GB virtual memory
+         space and the remaining part of the 4GB virtual memory space is used
+         by the kernel to permanently map as much physical memory as
+         possible.
+
+         If the machine has between 1 and 4 Gigabytes physical RAM, then
+         answer "4GB" here.
+
+         If more than 4 Gigabytes is used then answer "64GB" here. This
+         selection turns Intel PAE (Physical Address Extension) mode on.
+         PAE implements 3-level paging on IA32 processors. PAE is fully
+         supported by Linux, PAE mode is implemented on all recent Intel
+         processors (Pentium Pro and better). NOTE: If you say "64GB" here,
+         then the kernel will not boot on CPUs that don't support PAE!
+
+         The actual amount of total physical memory will either be
+         auto detected or can be forced by using a kernel command line option
+         such as "mem=256M". (Try "man bootparam" or see the documentation of
+         your boot loader (lilo or loadlin) about how to pass options to the
+         kernel at boot time.)
+
+         If unsure, say "off".
+
+config HIGHMEM4G
+       bool "4GB"
+       help
+         Select this if you have a 32-bit processor and between 1 and 4
+         gigabytes of physical RAM.
+
+config HIGHMEM64G
+       bool "64GB"
+       help
+         Select this if you have a 32-bit processor and more than 4
+         gigabytes of physical RAM.
+
+endchoice
+
+config HIGHMEM
+       bool
+       depends on HIGHMEM64G || HIGHMEM4G
+       default y
+
+config X86_PAE
+       bool
+       depends on HIGHMEM64G
+       default y
+
+# Common NUMA Features
+config NUMA
+       bool "Numa Memory Allocation and Scheduler Support"
+       depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
+       default n if X86_PC
+       default y if (X86_NUMAQ || X86_SUMMIT)
+
+# Need comments to help the hapless user trying to turn on NUMA support
+comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
+       depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
+
+comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
+       depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
+
+config DISCONTIGMEM
+       bool
+       depends on NUMA
+       default y
+
+config HAVE_ARCH_BOOTMEM_NODE
+       bool
+       depends on NUMA
+       default y
+
+config HIGHPTE
+       bool "Allocate 3rd-level pagetables from highmem"
+       depends on HIGHMEM4G || HIGHMEM64G
+       help
+         The VM uses one page table entry for each page of physical memory.
+         For systems with a lot of RAM, this can be wasteful of precious
+         low memory.  Setting this option will put user-space page table
+         entries in high memory.
+
+#config MTRR
+#       bool "MTRR (Memory Type Range Register) support"
+#       ---help---
+#         On Intel P6 family processors (Pentium Pro, Pentium II and later)
+#         the Memory Type Range Registers (MTRRs) may be used to control
+#         processor access to memory ranges. This is most useful if you have
+#         a video (VGA) card on a PCI or AGP bus. Enabling write-combining
+#         allows bus write transfers to be combined into a larger transfer
+#         before bursting over the PCI/AGP bus. This can increase performance
+#         of image write operations 2.5 times or more. Saying Y here creates a
+#         /proc/mtrr file which may be used to manipulate your processor's
+#         MTRRs. Typically the X server should use this.
+#
+#         This code has a reasonably generic interface so that similar
+#         control registers on other processors can be easily supported
+#         as well:
+#
+#         The Cyrix 6x86, 6x86MX and M II processors have Address Range
+#         Registers (ARRs) which provide a similar functionality to MTRRs. For
+#         these, the ARRs are used to emulate the MTRRs.
+#         The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
+#         MTRRs. The Centaur C6 (WinChip) has 8 MCRs, allowing
+#         write-combining. All of these processors are supported by this code
+#         and it makes sense to say Y here if you have one of them.
+#
+#         Saying Y here also fixes a problem with buggy SMP BIOSes which only
+#         set the MTRRs for the boot CPU and not for the secondary CPUs. This
+#         can lead to all sorts of problems, so it's good to say Y here.
+#
+#         You can safely say Y even if your machine doesn't have MTRRs, you'll
+#         just add about 9 KB to your kernel.
+#
+#         See <file:Documentation/mtrr.txt> for more information.
+
+config IRQBALANCE
+       bool "Enable kernel irq balancing"
+       depends on SMP && X86_IO_APIC
+       default y
+       help
+         The default yes will allow the kernel to do irq load balancing.
+         Saying no will keep the kernel from doing irq load balancing.
+
+config HAVE_DEC_LOCK
+       bool
+       depends on (SMP || PREEMPT) && X86_CMPXCHG
+       default y
+
+# turning this on wastes a bunch of space.
+# Summit needs it only when NUMA is on
+config BOOT_IOREMAP
+       bool
+       depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
+       default y
+
+config REGPARM
+       bool "Use register arguments (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       default n
+       help
+       Compile the kernel with -mregparm=3. This uses an different ABI
+       and passes the first three arguments of a function call in registers.
+       This will probably break binary only modules.
+
+       This feature is only enabled for gcc-3.0 and later - earlier compilers
+       generate incorrect output with certain kernel constructs when
+       -mregparm=3 is used.
+
+
+menu "Kernel hacking"
+
+config DEBUG_KERNEL
+       bool "Kernel debugging"
+       help
+         Say Y here if you are developing drivers or trying to debug and
+         identify kernel problems.
+
+config EARLY_PRINTK
+       bool "Early printk" if EMBEDDED
+       default y
+       help
+         Write kernel log output directly into the VGA buffer or to a serial
+         port.
+
+         This is useful for kernel debugging when your machine crashes very
+         early before the console code is initialized. For normal operation
+         it is not recommended because it looks ugly and doesn't cooperate
+         with klogd/syslogd or the X server. You should normally N here,
+         unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+       bool "Check for stack overflows"
+       depends on DEBUG_KERNEL
+
+config DEBUG_STACK_USAGE
+       bool "Stack utilization instrumentation"
+       depends on DEBUG_KERNEL
+       help
+         Enables the display of the minimum amount of free stack which each
+         task has ever had available in the sysrq-T and sysrq-P debug output.
+
+         This option will slow down process creation somewhat.
+
+config DEBUG_SLAB
+       bool "Debug memory allocations"
+       depends on DEBUG_KERNEL
+       help
+         Say Y here to have the kernel do limited verification on memory
+         allocation as well as poisoning memory on free to catch use of freed
+         memory.
+
+config MAGIC_SYSRQ
+       bool "Magic SysRq key"
+       depends on DEBUG_KERNEL
+       help
+         If you say Y here, you will have some control over the system even
+         if the system crashes for example during kernel debugging (e.g., you
+         will be able to flush the buffer cache to disk, reboot the system
+         immediately or dump some status information). This is accomplished
+         by pressing various keys while holding SysRq (Alt+PrintScreen). It
+         also works on a serial console (on PC hardware at least), if you
+         send a BREAK and then within 5 seconds a command keypress. The
+         keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
+         unless you really know what this hack does.
+
+config DEBUG_SPINLOCK
+       bool "Spinlock debugging"
+       depends on DEBUG_KERNEL
+       help
+         Say Y here and build SMP to catch missing spinlock initialization
+         and certain other kinds of spinlock errors commonly made.  This is
+         best used in conjunction with the NMI watchdog so that spinlock
+         deadlocks are also debuggable.
+
+config DEBUG_PAGEALLOC
+       bool "Page alloc debugging"
+       depends on DEBUG_KERNEL
+       help
+         Unmap pages from the kernel linear mapping after free_pages().
+         This results in a large slowdown, but helps to find certain types
+         of memory corruptions.
+
+config DEBUG_HIGHMEM
+       bool "Highmem debugging"
+       depends on DEBUG_KERNEL && HIGHMEM
+       help
+         This options enables addition error checking for high memory systems.
+         Disable for production systems.
+
+config DEBUG_INFO
+       bool "Compile the kernel with debug info"
+       depends on DEBUG_KERNEL
+       help
+          If you say Y here the resulting kernel image will include
+         debugging info resulting in a larger kernel image.
+         Say Y here only if you plan to use gdb to debug the kernel.
+         If you don't debug the kernel, you can say N.
+         
+config DEBUG_SPINLOCK_SLEEP
+       bool "Sleep-inside-spinlock checking"
+       help
+         If you say Y here, various routines which may sleep will become very
+         noisy if they are called with a spinlock held.        
+
+config FRAME_POINTER
+       bool "Compile the kernel with frame pointers"
+       help
+         If you say Y here the resulting kernel image will be slightly larger
+         and slower, but it will give very useful debugging information.
+         If you don't debug the kernel, you can say N, but we may not be able
+         to solve problems without frame pointers.
+
+config 4KSTACKS
+       bool "Use 4Kb for kernel stacks instead of 8Kb"
+       help
+         If you say Y here the kernel will use a 4Kb stacksize for the
+         kernel stack attached to each process/thread. This facilitates
+         running more threads on a system and also reduces the pressure
+         on the VM subsystem for higher order allocations. This option
+         will also use IRQ stacks to compensate for the reduced stackspace.
+
+config X86_FIND_SMP_CONFIG
+       bool
+       depends on X86_LOCAL_APIC || X86_VOYAGER
+       default y
+
+config X86_MPPARSE
+       bool
+       depends on X86_LOCAL_APIC && !X86_VISWS
+       default y
+
+endmenu
+
+config X86_SMP
+       bool
+       depends on SMP && !X86_VOYAGER
+       default y
+
+config X86_HT
+       bool
+       depends on SMP && !(X86_VISWS || X86_VOYAGER)
+       default y
+
+config X86_BIOS_REBOOT
+       bool
+       depends on !(X86_VISWS || X86_VOYAGER)
+       default y
+
+config X86_TRAMPOLINE
+       bool
+       depends on X86_SMP || (X86_VOYAGER && SMP)
+       default y
+
+# std_resources is overridden for pc9800, but that's not
+# a currently selectable arch choice
+config X86_STD_RESOURCES
+       bool
+       default y
+
+config PC
+       bool
+       depends on X86 && !EMBEDDED
+       default y
+
+endmenu
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/Makefile b/linux-2.6.7-xen-sparse/arch/xen/i386/Makefile
new file mode 100644 (file)
index 0000000..c06c8af
--- /dev/null
@@ -0,0 +1,97 @@
+#
+# i386/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" cleaning up for this architecture.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#
+# 19990713  Artur Skawina <skawina@geocities.com>
+#           Added '-march' and '-mpreferred-stack-boundary' support
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+LDFLAGS                := -m elf_i386
+LDFLAGS_vmlinux :=
+CHECK          := $(CHECK) -D__i386__=1
+
+CFLAGS += -pipe -msoft-float
+
+# prevent gcc from keeping the stack 16 byte aligned
+CFLAGS += $(call check_gcc,-mpreferred-stack-boundary=2,)
+
+align := $(subst -functions=0,,$(call check_gcc,-falign-functions=0,-malign-functions=0))
+
+cflags-$(CONFIG_M386)          += -march=i386
+cflags-$(CONFIG_M486)          += -march=i486
+cflags-$(CONFIG_M586)          += -march=i586
+cflags-$(CONFIG_M586TSC)       += -march=i586
+cflags-$(CONFIG_M586MMX)       += $(call check_gcc,-march=pentium-mmx,-march=i586)
+cflags-$(CONFIG_M686)          += -march=i686
+cflags-$(CONFIG_MPENTIUMII)    += $(call check_gcc,-march=pentium2,-march=i686)
+cflags-$(CONFIG_MPENTIUMIII)   += $(call check_gcc,-march=pentium3,-march=i686)
+cflags-$(CONFIG_MPENTIUMM)     += $(call check_gcc,-march=pentium3,-march=i686)
+cflags-$(CONFIG_MPENTIUM4)     += $(call check_gcc,-march=pentium4,-march=i686)
+cflags-$(CONFIG_MK6)           += -march=k6
+# Please note, that patches that add -march=athlon-xp and friends are pointless.
+# They make zero difference whatsosever to performance at this time.
+cflags-$(CONFIG_MK7)           += $(call check_gcc,-march=athlon,-march=i686 $(align)-functions=4)
+cflags-$(CONFIG_MK8)           += $(call check_gcc,-march=k8,$(call check_gcc,-march=athlon,-march=i686 $(align)-functions=4))
+cflags-$(CONFIG_MCRUSOE)       += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+cflags-$(CONFIG_MWINCHIPC6)    += $(call check_gcc,-march=winchip-c6,-march=i586)
+cflags-$(CONFIG_MWINCHIP2)     += $(call check_gcc,-march=winchip2,-march=i586)
+cflags-$(CONFIG_MWINCHIP3D)    += $(call check_gcc,-march=winchip2,-march=i586)
+cflags-$(CONFIG_MCYRIXIII)     += $(call check_gcc,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+cflags-$(CONFIG_MVIAC3_2)      += $(call check_gcc,-march=c3-2,-march=i686)
+
+# AMD Elan support
+cflags-$(CONFIG_X86_ELAN)      += -march=i486
+
+# -mregparm=3 works ok on gcc-3.0 and later
+#
+GCC_VERSION                    := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
+cflags-$(CONFIG_REGPARM)       += $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
+
+# Disable unit-at-a-time mode, it makes gcc use a lot more stack
+# due to the lack of sharing of stacklots.
+CFLAGS += $(call check_gcc,-fno-unit-at-a-time,)
+
+CFLAGS += $(cflags-y)
+
+head-y := arch/xen/i386/kernel/head.o arch/xen/i386/kernel/init_task.o
+
+libs-y                                         += arch/i386/lib/
+core-y                                 += arch/xen/i386/kernel/ \
+                                          arch/xen/i386/mm/
+# \
+#                                         arch/xen/$(mcore-y)/
+drivers-$(CONFIG_MATH_EMULATION)       += arch/i386/math-emu/
+drivers-$(CONFIG_PCI)                  += arch/i386/pci/
+# must be linked after kernel/
+drivers-$(CONFIG_OPROFILE)             += arch/i386/oprofile/
+drivers-$(CONFIG_PM)                   += arch/i386/power/
+
+# for clean
+obj-   += kernel/
+#obj-  += ../../i386/lib/ ../../i386/mm/ 
+#../../i386/$(mcore-y)/
+#obj-  += ../../i386/pci/ ../../i386/oprofile/ ../../i386/power/
+
+xenflags-y += -Iinclude/asm-xen/asm-i386/mach-xen
+CFLAGS += $(xenflags-y)
+AFLAGS += $(xenflags-y)
+
+prepare: include/asm-$(XENARCH)/asm_offsets.h
+CLEAN_FILES += include/asm-$(XENARCH)/asm_offsets.h
+
+arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
+       include/linux/version.h include/config/MARKER
+
+include/asm-$(XENARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
+       $(call filechk,gen-asm-offsets)
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/Makefile b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/Makefile
new file mode 100644 (file)
index 0000000..e844047
--- /dev/null
@@ -0,0 +1,94 @@
+#
+# Makefile for the linux kernel.
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/kernel
+
+extra-y := head.o init_task.o vmlinux.lds.s
+
+obj-y  := traps.o irq.o ldt.o setup.o entry.o time.o process.o signal.o
+
+c-obj-y        := semaphore.o vm86.o \
+               ptrace.o ioport.o sys_i386.o \
+               pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
+               doublefault.o
+s-obj-y        :=
+
+#obj-y                         += hypervisor.o
+obj-y                          += evtchn.o
+
+obj-y                          += cpu/
+obj-y                          += timers/
+c-obj-$(CONFIG_ACPI_BOOT)      += acpi/
+#c-obj-$(CONFIG_X86_BIOS_REBOOT)       += reboot.o
+c-obj-$(CONFIG_MCA)            += mca.o
+c-obj-$(CONFIG_X86_MSR)                += msr.o
+c-obj-$(CONFIG_X86_CPUID)      += cpuid.o
+c-obj-$(CONFIG_MICROCODE)      += microcode.o
+c-obj-$(CONFIG_APM)            += apm.o
+c-obj-$(CONFIG_X86_SMP)                += smp.o smpboot.o
+c-obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
+c-obj-$(CONFIG_X86_MPPARSE)    += mpparse.o
+c-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
+c-obj-$(CONFIG_X86_IO_APIC)    += io_apic.o
+c-obj-$(CONFIG_X86_NUMAQ)      += numaq.o
+c-obj-$(CONFIG_X86_SUMMIT_NUMA)        += summit.o
+c-obj-$(CONFIG_MODULES)                += module.o
+obj-y                          += sysenter.o
+obj-y                          += vsyscall.o
+c-obj-$(CONFIG_ACPI_SRAT)      += srat.o
+c-obj-$(CONFIG_HPET_TIMER)     += time_hpet.o
+c-obj-$(CONFIG_EFI)            += efi.o efi_stub.o
+c-obj-$(CONFIG_EARLY_PRINTK)   += early_printk.o
+c-obj-$(CONFIG_X86_STD_RESOURCES)      += std_resources.o
+
+EXTRA_AFLAGS   := -traditional
+
+c-obj-$(CONFIG_SCx200)         += scx200.o
+
+AFLAGS_vmlinux.lds.o += -U$(XENARCH)
+
+# vsyscall.o contains the vsyscall DSO images as __initdata.
+# We must build both images before we can assemble it.
+# Note: kbuild does not track this dependency due to usage of .incbin
+$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
+targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
+
+# The DSO images are built using a special linker script.
+quiet_cmd_syscall = SYSCALL $@
+      cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \
+                         -Wl,-T,$(filter-out FORCE,$^) -o $@
+
+vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
+SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
+SYSCFLAGS_vsyscall-int80.so    = $(vsyscall-flags)
+
+$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
+$(obj)/vsyscall-%.so: $(obj)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+       $(call if_changed,syscall)
+
+# We also create a special relocatable object that should mirror the symbol
+# table and layout of the linked DSO.  With ld -R we can then refer to
+# these symbols in the kernel code rather than hand-coded addresses.
+extra-y += vsyscall-syms.o
+$(obj)/built-in.o: $(obj)/vsyscall-syms.o
+$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
+
+SYSCFLAGS_vsyscall-syms.o = -r
+$(obj)/vsyscall-syms.o: $(obj)/vsyscall.lds $(obj)/vsyscall-sysenter.o FORCE
+       $(call if_changed,syscall)
+
+c-link := init_task.o
+s-link := vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
+       @ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
+
+$(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
+
+obj-y  += $(c-obj-y) $(s-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/Makefile b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
new file mode 100644 (file)
index 0000000..478e023
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Makefile for x86-compatible CPU details and quirks
+#
+
+CFLAGS += -Iarch/i386/kernel/cpu
+
+obj-y  :=      common.o
+c-obj-y        +=      proc.o
+
+c-obj-y        +=      amd.o
+c-obj-y        +=      cyrix.o
+c-obj-y        +=      centaur.o
+c-obj-y        +=      transmeta.o
+c-obj-y        +=      intel.o
+c-obj-y        +=      rise.o
+c-obj-y        +=      nexgen.o
+c-obj-y        +=      umc.o
+
+#obj-$(CONFIG_X86_MCE) +=      ../../../../i386/kernel/cpu/mcheck/
+
+#obj-$(CONFIG_MTRR)    +=      ../../../../i386/kernel/cpu/mtrr/
+#obj-$(CONFIG_CPU_FREQ)        +=      ../../../../i386/kernel/cpu/cpufreq/
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+       @ln -fsn $(srctree)/arch/i386/kernel/cpu/$(notdir $@) $@
+
+obj-y  += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/common.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/common.c
new file mode 100644 (file)
index 0000000..57d49ec
--- /dev/null
@@ -0,0 +1,597 @@
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/hypervisor.h>
+
+#include "cpu.h"
+
+static int cachesize_override __initdata = -1;
+static int disable_x86_fxsr __initdata = 0;
+static int disable_x86_serial_nr __initdata = 1;
+
+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+
+extern void mcheck_init(struct cpuinfo_x86 *c);
+
+extern int disable_pse;
+
+static void default_init(struct cpuinfo_x86 * c)
+{
+       /* Not much we can do here... */
+       /* Check if at least it has cpuid */
+       if (c->cpuid_level == -1) {
+               /* No cpuid. It must be an ancient CPU */
+               if (c->x86 == 4)
+                       strcpy(c->x86_model_id, "486");
+               else if (c->x86 == 3)
+                       strcpy(c->x86_model_id, "386");
+       }
+}
+
+static struct cpu_dev default_cpu = {
+       .c_init = default_init,
+};
+static struct cpu_dev * this_cpu = &default_cpu;
+
+static int __init cachesize_setup(char *str)
+{
+       get_option (&str, &cachesize_override);
+       return 1;
+}
+__setup("cachesize=", cachesize_setup);
+
+int __init get_model_name(struct cpuinfo_x86 *c)
+{
+       unsigned int *v;
+       char *p, *q;
+
+       if (cpuid_eax(0x80000000) < 0x80000004)
+               return 0;
+
+       v = (unsigned int *) c->x86_model_id;
+       cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+       cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+       cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+       c->x86_model_id[48] = 0;
+
+       /* Intel chips right-justify this string for some dumb reason;
+          undo that brain damage */
+       p = q = &c->x86_model_id[0];
+       while ( *p == ' ' )
+            p++;
+       if ( p != q ) {
+            while ( *p )
+                 *q++ = *p++;
+            while ( q <= &c->x86_model_id[48] )
+                 *q++ = '\0';  /* Zero-pad the rest */
+       }
+
+       return 1;
+}
+
+
+void __init display_cacheinfo(struct cpuinfo_x86 *c)
+{
+       unsigned int n, dummy, ecx, edx, l2size;
+
+       n = cpuid_eax(0x80000000);
+
+       if (n >= 0x80000005) {
+               cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+               printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+                       edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+               c->x86_cache_size=(ecx>>24)+(edx>>24);  
+       }
+
+       if (n < 0x80000006)     /* Some chips just has a large L1. */
+               return;
+
+       ecx = cpuid_ecx(0x80000006);
+       l2size = ecx >> 16;
+       
+       /* do processor-specific cache resizing */
+       if (this_cpu->c_size_cache)
+               l2size = this_cpu->c_size_cache(c,l2size);
+
+       /* Allow user to override all this if necessary. */
+       if (cachesize_override != -1)
+               l2size = cachesize_override;
+
+       if ( l2size == 0 )
+               return;         /* Again, no L2 cache is possible */
+
+       c->x86_cache_size = l2size;
+
+       printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+              l2size, ecx & 0xFF);
+}
+
+/* Naming convention should be: <Name> [(<Codename>)] */
+/* This table only is used unless init_<vendor>() below doesn't set it; */
+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
+
+/* Look up CPU names by table lookup. */
+static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+{
+       struct cpu_model_info *info;
+
+       if ( c->x86_model >= 16 )
+               return NULL;    /* Range check */
+
+       if (!this_cpu)
+               return NULL;
+
+       info = this_cpu->c_models;
+
+       while (info && info->family) {
+               if (info->family == c->x86)
+                       return info->model_names[c->x86_model];
+               info++;
+       }
+       return NULL;            /* Not found */
+}
+
+
+void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+{
+       char *v = c->x86_vendor_id;
+       int i;
+
+       for (i = 0; i < X86_VENDOR_NUM; i++) {
+               if (cpu_devs[i]) {
+                       if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
+                           (cpu_devs[i]->c_ident[1] && 
+                            !strcmp(v,cpu_devs[i]->c_ident[1]))) {
+                               c->x86_vendor = i;
+                               if (!early)
+                                       this_cpu = cpu_devs[i];
+                               break;
+                       }
+               }
+       }
+}
+
+
+static int __init x86_fxsr_setup(char * s)
+{
+       disable_x86_fxsr = 1;
+       return 1;
+}
+__setup("nofxsr", x86_fxsr_setup);
+
+
+/* Standard macro to see if a specific flag is changeable */
+static inline int flag_is_changeable_p(u32 flag)
+{
+       u32 f1, f2;
+
+       asm("pushfl\n\t"
+           "pushfl\n\t"
+           "popl %0\n\t"
+           "movl %0,%1\n\t"
+           "xorl %2,%0\n\t"
+           "pushl %0\n\t"
+           "popfl\n\t"
+           "pushfl\n\t"
+           "popl %0\n\t"
+           "popfl\n\t"
+           : "=&r" (f1), "=&r" (f2)
+           : "ir" (flag));
+
+       return ((f1^f2) & flag) != 0;
+}
+
+
+/* Probe for the CPUID instruction */
+int __init have_cpuid_p(void)
+{
+       return flag_is_changeable_p(X86_EFLAGS_ID);
+}
+
+/* Do minimum CPU detection early.
+   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+   The others are not touched to avoid unwanted side effects. */
+void __init early_cpu_detect(void)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+
+       c->x86_cache_alignment = 32;
+
+       if (!have_cpuid_p())
+               return;
+
+       /* Get vendor name */
+       cpuid(0x00000000, &c->cpuid_level,
+             (int *)&c->x86_vendor_id[0],
+             (int *)&c->x86_vendor_id[8],
+             (int *)&c->x86_vendor_id[4]);
+
+       get_cpu_vendor(c, 1);
+
+       c->x86 = 4;
+       if (c->cpuid_level >= 0x00000001) {
+               u32 junk, tfms, cap0, misc;
+               cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+               c->x86 = (tfms >> 8) & 15;
+               c->x86_model = (tfms >> 4) & 15;
+               if (c->x86 == 0xf) {
+                       c->x86 += (tfms >> 20) & 0xff;
+                       c->x86_model += ((tfms >> 16) & 0xF) << 4;
+               }
+               c->x86_mask = tfms & 15;
+               if (cap0 & (1<<19))
+                       c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
+       }
+
+       early_intel_workaround(c);
+}
+
+void __init generic_identify(struct cpuinfo_x86 * c)
+{
+       u32 tfms, xlvl;
+       int junk;
+
+       if (have_cpuid_p()) {
+               /* Get vendor name */
+               cpuid(0x00000000, &c->cpuid_level,
+                     (int *)&c->x86_vendor_id[0],
+                     (int *)&c->x86_vendor_id[8],
+                     (int *)&c->x86_vendor_id[4]);
+               
+               get_cpu_vendor(c, 0);
+               /* Initialize the standard set of capabilities */
+               /* Note that the vendor-specific code below might override */
+       
+               /* Intel-defined flags: level 0x00000001 */
+               if ( c->cpuid_level >= 0x00000001 ) {
+                       u32 capability, excap;
+                       cpuid(0x00000001, &tfms, &junk, &excap, &capability);
+                       c->x86_capability[0] = capability;
+                       c->x86_capability[4] = excap;
+                       c->x86 = (tfms >> 8) & 15;
+                       c->x86_model = (tfms >> 4) & 15;
+                       if (c->x86 == 0xf) {
+                               c->x86 += (tfms >> 20) & 0xff;
+                               c->x86_model += ((tfms >> 16) & 0xF) << 4;
+                       } 
+                       c->x86_mask = tfms & 15;
+               } else {
+                       /* Have CPUID level 0 only - unheard of */
+                       c->x86 = 4;
+               }
+
+               /* AMD-defined flags: level 0x80000001 */
+               xlvl = cpuid_eax(0x80000000);
+               if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+                       if ( xlvl >= 0x80000001 )
+                               c->x86_capability[1] = cpuid_edx(0x80000001);
+                       if ( xlvl >= 0x80000004 )
+                               get_model_name(c); /* Default name */
+               }
+       }
+}
+
+static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+{
+       if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
+               /* Disable processor serial number */
+               unsigned long lo,hi;
+               rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+               lo |= 0x200000;
+               wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+               printk(KERN_NOTICE "CPU serial number disabled.\n");
+               clear_bit(X86_FEATURE_PN, c->x86_capability);
+
+               /* Disabling the serial number may affect the cpuid level */
+               c->cpuid_level = cpuid_eax(0);
+       }
+}
+
+static int __init x86_serial_nr_setup(char *s)
+{
+       disable_x86_serial_nr = 0;
+       return 1;
+}
+__setup("serialnumber", x86_serial_nr_setup);
+
+
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init identify_cpu(struct cpuinfo_x86 *c)
+{
+       int i;
+
+       c->loops_per_jiffy = loops_per_jiffy;
+       c->x86_cache_size = -1;
+       c->x86_vendor = X86_VENDOR_UNKNOWN;
+       c->cpuid_level = -1;    /* CPUID not detected */
+       c->x86_model = c->x86_mask = 0; /* So far unknown... */
+       c->x86_vendor_id[0] = '\0'; /* Unset */
+       c->x86_model_id[0] = '\0';  /* Unset */
+       memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+       if (!have_cpuid_p()) {
+               /* First of all, decide if this is a 486 or higher */
+               /* It's a 486 if we can modify the AC flag */
+               if ( flag_is_changeable_p(X86_EFLAGS_AC) )
+                       c->x86 = 4;
+               else
+                       c->x86 = 3;
+       }
+
+       generic_identify(c);
+
+       printk(KERN_DEBUG "CPU:     After generic identify, caps: %08lx %08lx %08lx %08lx\n",
+               c->x86_capability[0],
+               c->x86_capability[1],
+               c->x86_capability[2],
+               c->x86_capability[3]);
+
+       if (this_cpu->c_identify) {
+               this_cpu->c_identify(c);
+
+       printk(KERN_DEBUG "CPU:     After vendor identify, caps: %08lx %08lx %08lx %08lx\n",
+               c->x86_capability[0],
+               c->x86_capability[1],
+               c->x86_capability[2],
+               c->x86_capability[3]);
+}
+
+       /*
+        * Vendor-specific initialization.  In this section we
+        * canonicalize the feature flags, meaning if there are
+        * features a certain CPU supports which CPUID doesn't
+        * tell us, CPUID claiming incorrect flags, or other bugs,
+        * we handle them here.
+        *
+        * At the end of this section, c->x86_capability better
+        * indicate the features this CPU genuinely supports!
+        */
+       if (this_cpu->c_init)
+               this_cpu->c_init(c);
+
+       /* Disable the PN if appropriate */
+       squash_the_stupid_serial_number(c);
+
+       /*
+        * The vendor-specific functions might have changed features.  Now
+        * we do "generic changes."
+        */
+
+       /* TSC disabled? */
+       if ( tsc_disable )
+               clear_bit(X86_FEATURE_TSC, c->x86_capability);
+
+       /* FXSR disabled? */
+       if (disable_x86_fxsr) {
+               clear_bit(X86_FEATURE_FXSR, c->x86_capability);
+               clear_bit(X86_FEATURE_XMM, c->x86_capability);
+       }
+
+       if (disable_pse)
+               clear_bit(X86_FEATURE_PSE, c->x86_capability);
+
+       /* If the model name is still unset, do table lookup. */
+       if ( !c->x86_model_id[0] ) {
+               char *p;
+               p = table_lookup_model(c);
+               if ( p )
+                       strcpy(c->x86_model_id, p);
+               else
+                       /* Last resort... */
+                       sprintf(c->x86_model_id, "%02x/%02x",
+                               c->x86_vendor, c->x86_model);
+       }
+
+       /* Now the feature flags better reflect actual CPU features! */
+
+       printk(KERN_DEBUG "CPU:     After all inits, caps: %08lx %08lx %08lx %08lx\n",
+              c->x86_capability[0],
+              c->x86_capability[1],
+              c->x86_capability[2],
+              c->x86_capability[3]);
+
+       /*
+        * On SMP, boot_cpu_data holds the common feature set between
+        * all CPUs; so make sure that we indicate which features are
+        * common between the CPUs.  The first time this routine gets
+        * executed, c == &boot_cpu_data.
+        */
+       if ( c != &boot_cpu_data ) {
+               /* AND the already accumulated flags with these */
+               for ( i = 0 ; i < NCAPINTS ; i++ )
+                       boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+       }
+
+       /* Init Machine Check Exception if available. */
+#ifdef CONFIG_X86_MCE
+       mcheck_init(c);
+#endif
+}
+/*
+ *     Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
+ */
+void __init dodgy_tsc(void)
+{
+       if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
+           ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC   ))
+               cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
+}
+
+void __init print_cpu_info(struct cpuinfo_x86 *c)
+{
+       char *vendor = NULL;
+
+       if (c->x86_vendor < X86_VENDOR_NUM)
+               vendor = this_cpu->c_vendor;
+       else if (c->cpuid_level >= 0)
+               vendor = c->x86_vendor_id;
+
+       if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
+               printk("%s ", vendor);
+
+       if (!c->x86_model_id[0])
+               printk("%d86", c->x86);
+       else
+               printk("%s", c->x86_model_id);
+
+       if (c->x86_mask || c->cpuid_level >= 0) 
+               printk(" stepping %02x\n", c->x86_mask);
+       else
+               printk("\n");
+}
+
+unsigned long cpu_initialized __initdata = 0;
+
+/* This is hacky. :)
+ * We're emulating future behavior.
+ * In the future, the cpu-specific init functions will be called implicitly
+ * via the magic of initcalls.
+ * They will insert themselves into the cpu_devs structure.
+ * Then, when cpu_init() is called, we can just iterate over that array.
+ */
+
+extern int intel_cpu_init(void);
+extern int cyrix_init_cpu(void);
+extern int nsc_init_cpu(void);
+extern int amd_init_cpu(void);
+extern int centaur_init_cpu(void);
+extern int transmeta_init_cpu(void);
+extern int rise_init_cpu(void);
+extern int nexgen_init_cpu(void);
+extern int umc_init_cpu(void);
+void early_cpu_detect(void);
+
+void __init early_cpu_init(void)
+{
+       early_cpu_detect();
+       intel_cpu_init();
+       cyrix_init_cpu();
+       nsc_init_cpu();
+       amd_init_cpu();
+       centaur_init_cpu();
+       transmeta_init_cpu();
+       rise_init_cpu();
+       nexgen_init_cpu();
+       umc_init_cpu();
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       /* pse is not compatible with on-the-fly unmapping,
+        * disable it even if the cpus claim to support it.
+        */
+       clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+       disable_pse = 1;
+#endif
+}
+
+void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
+{
+       unsigned long frames[gdt_descr->size >> PAGE_SHIFT];
+       unsigned long va;
+       int f;
+
+       for (va = gdt_descr->address, f = 0;
+            va < gdt_descr->address + gdt_descr->size;
+            va += PAGE_SIZE, f++) {
+               frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+               wrprotect_bootpt(swapper_pg_dir, (void *)va, 1);
+       }
+       flush_page_update_queue();
+       if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
+               BUG();
+       lgdt_finish();
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __init cpu_init (void)
+{
+       int cpu = smp_processor_id();
+       struct tss_struct * t = init_tss + cpu;
+       struct thread_struct *thread = &current->thread;
+
+       if (test_and_set_bit(cpu, &cpu_initialized)) {
+               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+               for (;;) local_irq_enable();
+       }
+       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+       if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
+               clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+       if (tsc_disable && cpu_has_tsc) {
+               printk(KERN_NOTICE "Disabling TSC...\n");
+               /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+               clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+               set_in_cr4(X86_CR4_TSD);
+       }
+
+       /*
+        * Initialize the per-CPU GDT with the boot GDT,
+        * and set up the GDT descriptor:
+        */
+       if (cpu) {
+               cpu_gdt_descr[cpu].size = GDT_SIZE;
+               cpu_gdt_descr[cpu].address = 0; /* XXXcl alloc page */
+               BUG();          /* XXXcl SMP */
+               memcpy((void *)cpu_gdt_descr[cpu].address,
+                   (void *)cpu_gdt_descr[0].address, GDT_SIZE);
+       }
+       /*
+        * Set up the per-thread TLS descriptor cache:
+        */
+       memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
+           GDT_ENTRY_TLS_ENTRIES * 8);
+
+       cpu_gdt_init(&cpu_gdt_descr[cpu]);
+
+       /*
+        * Delete NT
+        */
+       __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
+
+       /*
+        * Set up and load the per-CPU TSS and LDT
+        */
+       atomic_inc(&init_mm.mm_count);
+       current->active_mm = &init_mm;
+       if (current->mm)
+               BUG();
+       enter_lazy_tlb(&init_mm, current);
+
+       load_esp0(t, thread);
+
+       load_LDT(&init_mm.context);
+       flush_page_update_queue();
+
+       /* Clear %fs and %gs. */
+       asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
+
+       /* Clear all 6 debug registers: */
+
+#define CD(register) HYPERVISOR_set_debugreg(register, 0)
+
+       CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
+
+#undef CD
+
+       /*
+        * Force FPU initialization:
+        */
+       current_thread_info()->status = 0;
+       current->used_math = 0;
+       mxcsr_feature_mask_init();
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/entry.S b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/entry.S
new file mode 100644 (file)
index 0000000..d4fd8e1
--- /dev/null
@@ -0,0 +1,1030 @@
+/*
+ *  linux/arch/i386/entry.S
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
+ * on a 486.
+ *
+ * Stack layout in 'ret_from_system_call':
+ *     ptrace needs to have all regs on the stack.
+ *     if the order here is changed, it needs to be
+ *     updated in fork.c:copy_process, signal.c:do_signal,
+ *     ptrace.c and ptrace.h
+ *
+ *      0(%esp) - %ebx
+ *      4(%esp) - %ecx
+ *      8(%esp) - %edx
+ *       C(%esp) - %esi
+ *     10(%esp) - %edi
+ *     14(%esp) - %ebp
+ *     18(%esp) - %eax
+ *     1C(%esp) - %ds
+ *     20(%esp) - %es
+ *     24(%esp) - orig_eax
+ *     28(%esp) - %eip
+ *     2C(%esp) - %cs
+ *     30(%esp) - %eflags
+ *     34(%esp) - %oldesp
+ *     38(%esp) - %oldss
+ *
+ * "current" is in register %ebx during any slow entries.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include "irq_vectors.h"
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+
+#define nr_syscalls ((syscall_table_size)/4)
+
+EBX            = 0x00
+ECX            = 0x04
+EDX            = 0x08
+ESI            = 0x0C
+EDI            = 0x10
+EBP            = 0x14
+EAX            = 0x18
+DS             = 0x1C
+ES             = 0x20
+ORIG_EAX       = 0x24
+EIP            = 0x28
+CS             = 0x2C
+EFLAGS         = 0x30
+OLDESP         = 0x34
+OLDSS          = 0x38
+
+CF_MASK                = 0x00000001
+TF_MASK                = 0x00000100
+IF_MASK                = 0x00000200
+DF_MASK                = 0x00000400 
+NT_MASK                = 0x00004000
+VM_MASK                = 0x00020000
+
+/* Offsets into shared_info_t. */
+#define evtchn_upcall_pending          /* 0 */
+#define evtchn_upcall_mask             1
+
+#define XEN_BLOCK_EVENTS(reg)  movb $1,evtchn_upcall_mask(reg)
+#define XEN_UNBLOCK_EVENTS(reg)        movb $0,evtchn_upcall_mask(reg)
+#define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(%reg)
+
+#ifdef CONFIG_PREEMPT
+#define preempt_stop           movl HYPERVISOR_shared_info,%esi        ; \
+                               XEN_BLOCK_EVENTS(%esi)
+#else
+#define preempt_stop
+#define resume_kernel          restore_all
+#endif
+
+#define SAVE_ALL \
+       cld; \
+       pushl %es; \
+       pushl %ds; \
+       pushl %eax; \
+       pushl %ebp; \
+       pushl %edi; \
+       pushl %esi; \
+       pushl %edx; \
+       pushl %ecx; \
+       pushl %ebx; \
+       movl $(__KERNEL_DS), %edx; \
+       movl %edx, %ds; \
+       movl %edx, %es;
+       # XXXcl USER?
+
+#define RESTORE_INT_REGS \
+       popl %ebx;      \
+       popl %ecx;      \
+       popl %edx;      \
+       popl %esi;      \
+       popl %edi;      \
+       popl %ebp;      \
+       popl %eax
+
+#define RESTORE_REGS   \
+       RESTORE_INT_REGS; \
+1:     popl %ds;       \
+2:     popl %es;       \
+.section .fixup,"ax";  \
+3:     movl $0,(%esp); \
+       jmp 1b;         \
+4:     movl $0,(%esp); \
+       jmp 2b;         \
+.previous;             \
+.section __ex_table,"a";\
+       .align 4;       \
+       .long 1b,3b;    \
+       .long 2b,4b;    \
+.previous
+
+
+#define RESTORE_ALL    \
+       RESTORE_REGS    \
+       addl $4, %esp;  \
+1:     iret;           \
+.section .fixup,"ax";   \
+2:     movl $(__USER_DS), %edx; \
+       movl %edx, %ds; \
+       movl %edx, %es; \
+       pushl $11;      \
+       call do_exit;   \
+.previous;             \
+.section __ex_table,"a";\
+       .align 4;       \
+       .long 1b,2b;    \
+.previous
+
+
+
+ENTRY(lcall7)
+       pushfl                  # We get a different stack layout with call
+                               # gates, which has to be cleaned up later..
+       pushl %eax
+       SAVE_ALL
+       movl %esp, %ebp
+       pushl %ebp
+       pushl $0x7
+do_lcall:
+       movl EIP(%ebp), %eax    # due to call gates, this is eflags, not eip..
+       movl CS(%ebp), %edx     # this is eip..
+       movl EFLAGS(%ebp), %ecx # and this is cs..
+       movl %eax,EFLAGS(%ebp)  #
+       movl %edx,EIP(%ebp)     # Now we move them to their "normal" places
+       movl %ecx,CS(%ebp)      #
+       GET_THREAD_INFO_WITH_ESP(%ebp)  # GET_THREAD_INFO
+       movl TI_exec_domain(%ebp), %edx # Get the execution domain
+       call *EXEC_DOMAIN_handler(%edx) # Call the handler for the domain
+       addl $4, %esp
+       popl %eax
+       jmp resume_userspace
+
+ENTRY(lcall27)
+       pushfl                  # We get a different stack layout with call
+                               # gates, which has to be cleaned up later..
+       pushl %eax
+       SAVE_ALL
+       movl %esp, %ebp
+       pushl %ebp
+       pushl $0x27
+       jmp do_lcall
+
+
+ENTRY(ret_from_fork)
+       pushl %eax
+       call schedule_tail
+       GET_THREAD_INFO(%ebp)
+       popl %eax
+       jmp syscall_exit
+
+/*
+ * Return to user mode is not as complex as all this looks,
+ * but we want the default path for a system call return to
+ * go as quickly as possible which is why some of this is
+ * less clear than it otherwise should be.
+ */
+
+       # userspace resumption stub bypassing syscall exit tracing
+       ALIGN
+ret_from_exception:
+       preempt_stop
+ret_from_intr:
+       GET_THREAD_INFO(%ebp)
+       movl EFLAGS(%esp), %eax         # mix EFLAGS and CS
+       movb CS(%esp), %al
+       testl $(VM_MASK | 2), %eax
+       jz resume_kernel                # returning to kernel or vm86-space
+ENTRY(resume_userspace)
+       movl HYPERVISOR_shared_info,%esi
+       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
+                                       # make sure we don't miss an interrupt
+                                       # setting need_resched or sigpending
+                                       # between sampling and the iret
+ret_syscall_tests:
+       movl TI_flags(%ebp), %ecx
+       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
+                                       # int/exception return?
+       jne work_pending
+       jmp restore_all_enable_events
+
+#ifdef CONFIG_PREEMPT
+ENTRY(resume_kernel)
+       movl HYPERVISOR_shared_info,%esi
+       cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
+       jnz restore_all_enable_events
+need_resched:
+       movl TI_flags(%ebp), %ecx       # need_resched set ?
+       testb $_TIF_NEED_RESCHED, %cl
+       jz restore_all_enable_events
+       testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
+       jz restore_all_enable_events
+       movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp)
+       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+       call schedule
+       movl $0,TI_preempt_count(%ebp)
+       movl HYPERVISOR_shared_info,%esi
+       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
+       jmp need_resched
+#endif
+
+/* SYSENTER_RETURN points to after the "sysenter" instruction in
+   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
+
+       # sysenter call handler stub
+ENTRY(sysenter_entry)
+       movl TSS_sysenter_esp0(%esp),%esp
+sysenter_past_esp:
+       sti
+       pushl $(__USER_DS)
+       pushl %ebp
+       pushfl
+       pushl $(__USER_CS)
+       pushl $SYSENTER_RETURN
+
+/*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
+       cmpl $__PAGE_OFFSET-3,%ebp
+       jae syscall_fault
+1:     movl (%ebp),%ebp
+.section __ex_table,"a"
+       .align 4
+       .long 1b,syscall_fault
+.previous
+
+       pushl %eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+       cmpl $(nr_syscalls), %eax
+       jae syscall_badsys
+
+       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+       jnz syscall_trace_entry
+       call *sys_call_table(,%eax,4)
+       movl %eax,EAX(%esp)
+       cli
+       movl TI_flags(%ebp), %ecx
+       testw $_TIF_ALLWORK_MASK, %cx
+       jne syscall_exit_work
+/* if something modifies registers it must also disable sysexit */
+       movl EIP(%esp), %edx
+       movl OLDESP(%esp), %ecx
+       sti
+       sysexit
+
+
+       # system call handler stub
+ENTRY(system_call)
+       pushl %eax                      # save orig_eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+       cmpl $(nr_syscalls), %eax
+       jae syscall_badsys
+                                       # system call tracing in operation
+       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+       jnz syscall_trace_entry
+syscall_call:
+       call *sys_call_table(,%eax,4)
+       movl %eax,EAX(%esp)             # store the return value
+syscall_exit:
+       movl HYPERVISOR_shared_info,%esi
+       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
+                                       # make sure we don't miss an interrupt
+                                       # setting need_resched or sigpending
+                                       # between sampling and the iret
+       movl TI_flags(%ebp), %ecx
+       testw $_TIF_ALLWORK_MASK, %cx   # current->work
+       jne syscall_exit_work
+       jmp restore_all_enable_events
+
+       ALIGN
+restore_all:
+       RESTORE_ALL
+
+       # perform work that needs to be done immediately before resumption
+       ALIGN
+work_pending:
+       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+       testb $_TIF_NEED_RESCHED, %cl
+       jz work_notifysig
+work_resched:
+       call schedule
+       movl HYPERVISOR_shared_info,%esi
+       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
+                                       # make sure we don't miss an interrupt
+                                       # setting need_resched or sigpending
+                                       # between sampling and the iret
+       movl TI_flags(%ebp), %ecx
+       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
+                                       # than syscall tracing?
+       jz restore_all_enable_events
+       # XXXcl sti missing???
+       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+       testb $_TIF_NEED_RESCHED, %cl
+       jnz work_resched
+
+work_notifysig:                                # deal with pending signals and
+                                       # notify-resume requests
+       testl $VM_MASK, EFLAGS(%esp)
+       movl %esp, %eax
+       jne work_notifysig_v86          # returning to kernel-space or
+                                       # vm86-space
+       xorl %edx, %edx
+       call do_notify_resume
+       movl HYPERVISOR_shared_info,%esi
+       jmp restore_all_enable_events
+
+       ALIGN
+work_notifysig_v86:
+       pushl %ecx
+       call save_v86_state
+       popl %ecx
+       movl %eax, %esp
+       xorl %edx, %edx
+       call do_notify_resume
+       movl HYPERVISOR_shared_info,%esi
+       jmp restore_all_enable_events
+
+       # perform syscall exit tracing
+       ALIGN
+syscall_trace_entry:
+       movl $-ENOSYS,EAX(%esp)
+       movl %esp, %eax
+       xorl %edx,%edx
+       call do_syscall_trace
+       movl ORIG_EAX(%esp), %eax
+       cmpl $(nr_syscalls), %eax
+       jnae syscall_call
+       jmp syscall_exit
+
+       # perform syscall exit tracing
+       ALIGN
+syscall_exit_work:
+       movl HYPERVISOR_shared_info,%esi
+       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT), %cl
+       jz work_pending
+       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+                                       # could let do_syscall_trace() call
+                                       # schedule() instead
+       movl %esp, %eax
+       movl $1, %edx
+       call do_syscall_trace
+       jmp resume_userspace
+
+       ALIGN
+syscall_fault:
+       pushl %eax                      # save orig_eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+       movl $-EFAULT,EAX(%esp)
+       jmp resume_userspace
+
+       ALIGN
+syscall_badsys:
+       movl $-ENOSYS,EAX(%esp)
+       jmp resume_userspace
+
+ENTRY(divide_error)
+       pushl $0                        # no error code
+       pushl $do_divide_error
+       ALIGN
+error_code:
+       pushl %ds
+       pushl %eax
+       xorl %eax, %eax
+       pushl %ebp
+       pushl %edi
+       pushl %esi
+       pushl %edx
+       decl %eax                       # eax = -1
+       pushl %ecx
+       pushl %ebx
+       cld
+       movl %es, %ecx
+       movl ORIG_EAX(%esp), %esi       # get the error code
+       movl ES(%esp), %edi             # get the function address
+       movl %eax, ORIG_EAX(%esp)
+       movl %ecx, ES(%esp)
+       movl %esp, %edx
+       pushl %esi                      # push the error code
+       pushl %edx                      # push the pt_regs pointer
+       movl $(__KERNEL_DS), %edx       # XXXcl USER?
+       movl %edx, %ds
+       movl %edx, %es
+       call *%edi
+       addl $8, %esp
+       jmp ret_from_exception
+
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until we've done all processing. HOWEVER, we must enable events before
+# popping the stack frame (can't be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so we'd
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+ENTRY(hypervisor_callback)
+       pushl %eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+       movl EIP(%esp),%eax
+       cmpl $scrit,%eax
+       jb   11f
+       cmpl $ecrit,%eax
+       jb   critical_region_fixup
+11:    push %esp
+       call evtchn_do_upcall
+       add  $4,%esp
+       movl HYPERVISOR_shared_info,%esi
+       movb CS(%esp),%cl
+       test $2,%cl                     # slow return to ring 2 or 3
+       jne  ret_syscall_tests
+restore_all_enable_events:  
+safesti:XEN_UNBLOCK_EVENTS(%esi)       # reenable event callbacks
+scrit: /**** START OF CRITICAL REGION ****/
+       testb $1,evtchn_upcall_pending(%esi)
+       jnz  14f                        # process more events if necessary...
+       RESTORE_ALL
+14:    XEN_BLOCK_EVENTS(%esi)
+       jmp  11b
+ecrit:  /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame. 
+critical_region_fixup:
+       addl $critical_fixup_table-scrit,%eax
+       movzbl (%eax),%eax              # %eax contains num bytes popped
+       mov  %esp,%esi
+       add  %eax,%esi                  # %esi points at end of src region
+       mov  %esp,%edi
+       add  $0x34,%edi                 # %edi points at end of dst region
+       mov  %eax,%ecx
+       shr  $2,%ecx                    # convert words to bytes
+       je   16f                        # skip loop if nothing to copy
+15:    subl $4,%esi                    # pre-decrementing copy loop
+       subl $4,%edi
+       movl (%esi),%eax
+       movl %eax,(%edi)
+       loop 15b
+16:    movl %edi,%esp                  # final %edi is top of merged stack
+       jmp  11b
+
+critical_fixup_table:
+       .byte 0x00,0x00,0x00            # testb $0xff,(%esi)
+       .byte 0x00,0x00                 # jnz  14f
+       .byte 0x00                      # pop  %ebx
+       .byte 0x04                      # pop  %ecx
+       .byte 0x08                      # pop  %edx
+       .byte 0x0c                      # pop  %esi
+       .byte 0x10                      # pop  %edi
+       .byte 0x14                      # pop  %ebp
+       .byte 0x18                      # pop  %eax
+       .byte 0x1c                      # pop  %ds
+       .byte 0x20                      # pop  %es
+       .byte 0x24,0x24,0x24            # add  $4,%esp
+       .byte 0x28                      # iret
+       .byte 0x00,0x00,0x00,0x00       # movb $1,1(%esi)
+       .byte 0x00,0x00                 # jmp  11b
+
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+       pushal
+       call install_safe_pf_handler
+       movl 32(%esp),%ebx
+1:     movl %ebx,%ds
+       movl 36(%esp),%ebx
+2:     movl %ebx,%es
+       movl 40(%esp),%ebx
+3:     movl %ebx,%fs
+       movl 44(%esp),%ebx
+4:     movl %ebx,%gs
+       call install_normal_pf_handler
+       popal
+       addl $16,%esp
+5:     iret
+.section .fixup,"ax";  \
+6:     xorl %ebx,%ebx; \
+       jmp 1b;         \
+7:     xorl %ebx,%ebx; \
+       jmp 2b;         \
+8:     xorl %ebx,%ebx; \
+       jmp 3b;         \
+9:     xorl %ebx,%ebx; \
+       jmp 4b;         \
+10:    pushl %ss;      \
+       popl %ds;       \
+       pushl %ss;      \
+       popl %es;       \
+       pushl $11;      \
+       call do_exit;   \
+.previous;             \
+.section __ex_table,"a";\
+       .align 4;       \
+       .long 1b,6b;    \
+       .long 2b,7b;    \
+       .long 3b,8b;    \
+       .long 4b,9b;    \
+       .long 5b,10b;   \
+.previous
+
+ENTRY(coprocessor_error)
+       pushl $0
+       pushl $do_coprocessor_error
+       jmp error_code
+
+ENTRY(simd_coprocessor_error)
+       pushl $0
+       pushl $do_simd_coprocessor_error
+       jmp error_code
+
+ENTRY(device_not_available)
+       pushl $-1                       # mark this as an int
+       SAVE_ALL
+       preempt_stop
+       call math_state_restore
+       jmp ret_from_exception
+
+/*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+ * that sets up the real kernel stack. Check here, since we can't
+ * allow the wrong stack to be used.
+ *
+ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
+ * already pushed 3 words if it hits on the sysenter instruction:
+ * eflags, cs and eip.
+ *
+ * We just load the right stack, and push the three (known) values
+ * by hand onto the new stack - while updating the return eip past
+ * the instruction that would have done it for sysenter.
+ */
+#define FIX_STACK(offset, ok, label)           \
+       cmpw $__KERNEL_CS,4(%esp);              \
+       jne ok;                                 \
+label:                                         \
+       movl TSS_sysenter_esp0+offset(%esp),%esp;       \
+       pushfl;                                 \
+       pushl $__KERNEL_CS;                     \
+       pushl $sysenter_past_esp
+
+ENTRY(debug)
+       cmpl $sysenter_entry,(%esp)
+       jne debug_stack_correct
+       FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
+debug_stack_correct:
+       pushl $0
+       pushl $do_debug
+       jmp error_code
+
+#if 0
+/*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+ * a debug fault, and the debug fault hasn't yet been able to
+ * clear up the stack. So we first check whether we got  an
+ * NMI on the sysenter entry path, but after that we need to
+ * check whether we got an NMI on the debug path where the debug
+ * fault happened on the sysenter path.
+ */
+ENTRY(nmi)
+       cmpl $sysenter_entry,(%esp)
+       je nmi_stack_fixup
+       pushl %eax
+       movl %esp,%eax
+       /* Do not access memory above the end of our stack page,
+        * it might not exist.
+        */
+       andl $(THREAD_SIZE-1),%eax
+       cmpl $(THREAD_SIZE-20),%eax
+       popl %eax
+       jae nmi_stack_correct
+       cmpl $sysenter_entry,12(%esp)
+       je nmi_debug_stack_check
+nmi_stack_correct:
+       pushl %eax
+       SAVE_ALL
+       movl %esp, %edx
+       pushl $0
+       pushl %edx
+       call do_nmi
+       addl $8, %esp
+       RESTORE_ALL
+
+nmi_stack_fixup:
+       FIX_STACK(12,nmi_stack_correct, 1)
+       jmp nmi_stack_correct
+nmi_debug_stack_check:
+       cmpw $__KERNEL_CS,16(%esp)
+       jne nmi_stack_correct
+       cmpl $debug - 1,(%esp)
+       jle nmi_stack_correct
+       cmpl $debug_esp_fix_insn,(%esp)
+       jle nmi_debug_stack_fixup
+nmi_debug_stack_fixup:
+       FIX_STACK(24,nmi_stack_correct, 1)
+       jmp nmi_stack_correct
+#endif
+
+ENTRY(int3)
+       pushl $0
+       pushl $do_int3
+       jmp error_code
+
+ENTRY(overflow)
+       pushl $0
+       pushl $do_overflow
+       jmp error_code
+
+ENTRY(bounds)
+       pushl $0
+       pushl $do_bounds
+       jmp error_code
+
+ENTRY(invalid_op)
+       pushl $0
+       pushl $do_invalid_op
+       jmp error_code
+
+ENTRY(coprocessor_segment_overrun)
+       pushl $0
+       pushl $do_coprocessor_segment_overrun
+       jmp error_code
+
+ENTRY(double_fault)
+       pushl $do_double_fault
+       jmp error_code
+
+ENTRY(invalid_TSS)
+       pushl $do_invalid_TSS
+       jmp error_code
+
+ENTRY(segment_not_present)
+       pushl $do_segment_not_present
+       jmp error_code
+
+ENTRY(stack_segment)
+       pushl $do_stack_segment
+       jmp error_code
+
+ENTRY(general_protection)
+       pushl $do_general_protection
+       jmp error_code
+
+ENTRY(alignment_check)
+       pushl $do_alignment_check
+       jmp error_code
+
+# This handler is special, because it gets an extra value on its stack,
+# which is the linear faulting address.
+#define PAGE_FAULT_STUB(_name1, _name2)                                          \
+ENTRY(_name1)                                                            \
+       pushl %ds                                                       ; \
+       pushl %eax                                                      ; \
+       xorl %eax,%eax                                                  ; \
+       pushl %ebp                                                      ; \
+       pushl %edi                                                      ; \
+       pushl %esi                                                      ; \
+       pushl %edx                                                      ; \
+       decl %eax                       /* eax = -1 */                  ; \
+       pushl %ecx                                                      ; \
+       pushl %ebx                                                      ; \
+       GET_THREAD_INFO(%ebp)                                           ; \
+       cld                                                             ; \
+       movl %es,%ecx                                                   ; \
+       movl ORIG_EAX(%esp), %esi       /* get the error code */        ; \
+       movl ES(%esp), %edi             /* get the faulting address */  ; \
+       movl %eax, ORIG_EAX(%esp)                                       ; \
+       movl %ecx, ES(%esp)                                             ; \
+       movl %esp,%edx                                                  ; \
+       pushl %edi                      /* push the faulting address */ ; \
+       pushl %esi                      /* push the error code */       ; \
+       pushl %edx                      /* push the pt_regs pointer */  ; \
+       movl $(__KERNEL_DS),%edx                                        ; \
+       movl %edx,%ds                                                   ; \
+       movl %edx,%es                                                   ; \
+       call _name2                                                     ; \
+       addl $12,%esp                                                   ; \
+       jmp ret_from_exception                                          ;
+PAGE_FAULT_STUB(page_fault, do_page_fault)
+PAGE_FAULT_STUB(safe_page_fault, do_safe_page_fault)
+
+#ifdef CONFIG_X86_MCE
+ENTRY(machine_check)
+       pushl $0
+       pushl machine_check_vector
+       jmp error_code
+#endif
+
+ENTRY(spurious_interrupt_bug)
+       pushl $0
+       pushl $do_spurious_interrupt_bug
+       jmp error_code
+
+.data
+ENTRY(sys_call_table)
+       .long sys_restart_syscall       /* 0 - old "setup()" system call, used for restarting */
+       .long sys_exit
+       .long sys_fork
+       .long sys_read
+       .long sys_write
+       .long sys_open          /* 5 */
+       .long sys_close
+       .long sys_waitpid
+       .long sys_creat
+       .long sys_link
+       .long sys_unlink        /* 10 */
+       .long sys_execve
+       .long sys_chdir
+       .long sys_time
+       .long sys_mknod
+       .long sys_chmod         /* 15 */
+       .long sys_lchown16
+       .long sys_ni_syscall    /* old break syscall holder */
+       .long sys_stat
+       .long sys_lseek
+       .long sys_getpid        /* 20 */
+       .long sys_mount
+       .long sys_oldumount
+       .long sys_setuid16
+       .long sys_getuid16
+       .long sys_stime         /* 25 */
+       .long sys_ptrace
+       .long sys_alarm
+       .long sys_fstat
+       .long sys_pause
+       .long sys_utime         /* 30 */
+       .long sys_ni_syscall    /* old stty syscall holder */
+       .long sys_ni_syscall    /* old gtty syscall holder */
+       .long sys_access
+       .long sys_nice
+       .long sys_ni_syscall    /* 35 - old ftime syscall holder */
+       .long sys_sync
+       .long sys_kill
+       .long sys_rename
+       .long sys_mkdir
+       .long sys_rmdir         /* 40 */
+       .long sys_dup
+       .long sys_pipe
+       .long sys_times
+       .long sys_ni_syscall    /* old prof syscall holder */
+       .long sys_brk           /* 45 */
+       .long sys_setgid16
+       .long sys_getgid16
+       .long sys_signal
+       .long sys_geteuid16
+       .long sys_getegid16     /* 50 */
+       .long sys_acct
+       .long sys_umount        /* recycled never used phys() */
+       .long sys_ni_syscall    /* old lock syscall holder */
+       .long sys_ioctl
+       .long sys_fcntl         /* 55 */
+       .long sys_ni_syscall    /* old mpx syscall holder */
+       .long sys_setpgid
+       .long sys_ni_syscall    /* old ulimit syscall holder */
+       .long sys_olduname
+       .long sys_umask         /* 60 */
+       .long sys_chroot
+       .long sys_ustat
+       .long sys_dup2
+       .long sys_getppid
+       .long sys_getpgrp       /* 65 */
+       .long sys_setsid
+       .long sys_sigaction
+       .long sys_sgetmask
+       .long sys_ssetmask
+       .long sys_setreuid16    /* 70 */
+       .long sys_setregid16
+       .long sys_sigsuspend
+       .long sys_sigpending
+       .long sys_sethostname
+       .long sys_setrlimit     /* 75 */
+       .long sys_old_getrlimit
+       .long sys_getrusage
+       .long sys_gettimeofday
+       .long sys_settimeofday
+       .long sys_getgroups16   /* 80 */
+       .long sys_setgroups16
+       .long old_select
+       .long sys_symlink
+       .long sys_lstat
+       .long sys_readlink      /* 85 */
+       .long sys_uselib
+       .long sys_swapon
+       .long sys_reboot
+       .long old_readdir
+       .long old_mmap          /* 90 */
+       .long sys_munmap
+       .long sys_truncate
+       .long sys_ftruncate
+       .long sys_fchmod
+       .long sys_fchown16      /* 95 */
+       .long sys_getpriority
+       .long sys_setpriority
+       .long sys_ni_syscall    /* old profil syscall holder */
+       .long sys_statfs
+       .long sys_fstatfs       /* 100 */
+       .long sys_ioperm
+       .long sys_socketcall
+       .long sys_syslog
+       .long sys_setitimer
+       .long sys_getitimer     /* 105 */
+       .long sys_newstat
+       .long sys_newlstat
+       .long sys_newfstat
+       .long sys_uname
+       .long sys_iopl          /* 110 */
+       .long sys_vhangup
+       .long sys_ni_syscall    /* old "idle" system call */
+       .long sys_vm86old
+       .long sys_wait4
+       .long sys_swapoff       /* 115 */
+       .long sys_sysinfo
+       .long sys_ipc
+       .long sys_fsync
+       .long sys_sigreturn
+       .long sys_clone         /* 120 */
+       .long sys_setdomainname
+       .long sys_newuname
+       .long sys_modify_ldt
+       .long sys_adjtimex
+       .long sys_mprotect      /* 125 */
+       .long sys_sigprocmask
+       .long sys_ni_syscall    /* old "create_module" */ 
+       .long sys_init_module
+       .long sys_delete_module
+       .long sys_ni_syscall    /* 130: old "get_kernel_syms" */
+       .long sys_quotactl
+       .long sys_getpgid
+       .long sys_fchdir
+       .long sys_bdflush
+       .long sys_sysfs         /* 135 */
+       .long sys_personality
+       .long sys_ni_syscall    /* reserved for afs_syscall */
+       .long sys_setfsuid16
+       .long sys_setfsgid16
+       .long sys_llseek        /* 140 */
+       .long sys_getdents
+       .long sys_select
+       .long sys_flock
+       .long sys_msync
+       .long sys_readv         /* 145 */
+       .long sys_writev
+       .long sys_getsid
+       .long sys_fdatasync
+       .long sys_sysctl
+       .long sys_mlock         /* 150 */
+       .long sys_munlock
+       .long sys_mlockall
+       .long sys_munlockall
+       .long sys_sched_setparam
+       .long sys_sched_getparam   /* 155 */
+       .long sys_sched_setscheduler
+       .long sys_sched_getscheduler
+       .long sys_sched_yield
+       .long sys_sched_get_priority_max
+       .long sys_sched_get_priority_min  /* 160 */
+       .long sys_sched_rr_get_interval
+       .long sys_nanosleep
+       .long sys_mremap
+       .long sys_setresuid16
+       .long sys_getresuid16   /* 165 */
+       .long sys_vm86
+       .long sys_ni_syscall    /* Old sys_query_module */
+       .long sys_poll
+       .long sys_nfsservctl
+       .long sys_setresgid16   /* 170 */
+       .long sys_getresgid16
+       .long sys_prctl
+       .long sys_rt_sigreturn
+       .long sys_rt_sigaction
+       .long sys_rt_sigprocmask        /* 175 */
+       .long sys_rt_sigpending
+       .long sys_rt_sigtimedwait
+       .long sys_rt_sigqueueinfo
+       .long sys_rt_sigsuspend
+       .long sys_pread64       /* 180 */
+       .long sys_pwrite64
+       .long sys_chown16
+       .long sys_getcwd
+       .long sys_capget
+       .long sys_capset        /* 185 */
+       .long sys_sigaltstack
+       .long sys_sendfile
+       .long sys_ni_syscall    /* reserved for streams1 */
+       .long sys_ni_syscall    /* reserved for streams2 */
+       .long sys_vfork         /* 190 */
+       .long sys_getrlimit
+       .long sys_mmap2
+       .long sys_truncate64
+       .long sys_ftruncate64
+       .long sys_stat64        /* 195 */
+       .long sys_lstat64
+       .long sys_fstat64
+       .long sys_lchown
+       .long sys_getuid
+       .long sys_getgid        /* 200 */
+       .long sys_geteuid
+       .long sys_getegid
+       .long sys_setreuid
+       .long sys_setregid
+       .long sys_getgroups     /* 205 */
+       .long sys_setgroups
+       .long sys_fchown
+       .long sys_setresuid
+       .long sys_getresuid
+       .long sys_setresgid     /* 210 */
+       .long sys_getresgid
+       .long sys_chown
+       .long sys_setuid
+       .long sys_setgid
+       .long sys_setfsuid      /* 215 */
+       .long sys_setfsgid
+       .long sys_pivot_root
+       .long sys_mincore
+       .long sys_madvise
+       .long sys_getdents64    /* 220 */
+       .long sys_fcntl64
+       .long sys_ni_syscall    /* reserved for TUX */
+       .long sys_ni_syscall
+       .long sys_gettid
+       .long sys_readahead     /* 225 */
+       .long sys_setxattr
+       .long sys_lsetxattr
+       .long sys_fsetxattr
+       .long sys_getxattr
+       .long sys_lgetxattr     /* 230 */
+       .long sys_fgetxattr
+       .long sys_listxattr
+       .long sys_llistxattr
+       .long sys_flistxattr
+       .long sys_removexattr   /* 235 */
+       .long sys_lremovexattr
+       .long sys_fremovexattr
+       .long sys_tkill
+       .long sys_sendfile64
+       .long sys_futex         /* 240 */
+       .long sys_sched_setaffinity
+       .long sys_sched_getaffinity
+       .long sys_set_thread_area
+       .long sys_get_thread_area
+       .long sys_io_setup      /* 245 */
+       .long sys_io_destroy
+       .long sys_io_getevents
+       .long sys_io_submit
+       .long sys_io_cancel
+       .long sys_fadvise64     /* 250 */
+       .long sys_ni_syscall
+       .long sys_exit_group
+       .long sys_lookup_dcookie
+       .long sys_epoll_create
+       .long sys_epoll_ctl     /* 255 */
+       .long sys_epoll_wait
+       .long sys_remap_file_pages
+       .long sys_set_tid_address
+       .long sys_timer_create
+       .long sys_timer_settime         /* 260 */
+       .long sys_timer_gettime
+       .long sys_timer_getoverrun
+       .long sys_timer_delete
+       .long sys_clock_settime
+       .long sys_clock_gettime         /* 265 */
+       .long sys_clock_getres
+       .long sys_clock_nanosleep
+       .long sys_statfs64
+       .long sys_fstatfs64     
+       .long sys_tgkill        /* 270 */
+       .long sys_utimes
+       .long sys_fadvise64_64
+       .long sys_ni_syscall    /* sys_vserver */
+       .long sys_mbind
+       .long sys_get_mempolicy
+       .long sys_set_mempolicy
+       .long sys_mq_open
+       .long sys_mq_unlink
+       .long sys_mq_timedsend
+       .long sys_mq_timedreceive       /* 280 */
+       .long sys_mq_notify
+       .long sys_mq_getsetattr
+       .long sys_ni_syscall            /* reserved for kexec */
+
+syscall_table_size=(.-sys_call_table)
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/evtchn.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/evtchn.c
new file mode 100644 (file)
index 0000000..d18e6ac
--- /dev/null
@@ -0,0 +1,479 @@
+/******************************************************************************
+ * evtchn.c
+ * 
+ * Communication via Xen event channels.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser
+ */
+
+#include <linux/config.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
+#include <asm/hypervisor.h>
+#include <asm/hypervisor-ifs/event_channel.h>
+#include <asm/hypervisor-ifs/physdev.h>
+#include <asm-xen/ctrl_if.h>
+
+/*
+ * This lock protects updates to the following mapping and reference-count
+ * arrays. The lock does not need to be acquired to read the mapping tables.
+ */
+static spinlock_t irq_mapping_update_lock;
+
+/* IRQ <-> event-channel mappings. */
+static int evtchn_to_irq[NR_EVENT_CHANNELS];
+static int irq_to_evtchn[NR_IRQS];
+
+/* IRQ <-> VIRQ mapping. */
+static int virq_to_irq[NR_VIRQS];
+
+/* Reference counts for bindings to IRQs. */
+static int irq_bindcount[NR_IRQS];
+
+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
+
+/* Upcall to generic IRQ layer. */
+extern asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs);
+
+#define VALID_EVTCHN(_chn) ((_chn) != -1)
+
+void evtchn_do_upcall(struct pt_regs *regs)
+{
+    unsigned long  l1, l2;
+    unsigned int   l1i, l2i, port;
+    int            irq;
+    unsigned long  flags;
+    shared_info_t *s = HYPERVISOR_shared_info;
+
+    local_irq_save(flags);
+    
+    while ( s->vcpu_data[0].evtchn_upcall_pending )
+    {
+        s->vcpu_data[0].evtchn_upcall_pending = 0;
+        /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
+        l1 = xchg(&s->evtchn_pending_sel, 0);
+        while ( (l1i = ffs(l1)) != 0 )
+        {
+            l1i--;
+            l1 &= ~(1 << l1i);
+        
+            l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
+            while ( (l2i = ffs(l2)) != 0 )
+            {
+                l2i--;
+                l2 &= ~(1 << l2i);
+            
+                port = (l1i << 5) + l2i;
+                if ( (irq = evtchn_to_irq[port]) != -1 )
+                    do_IRQ(irq, regs);
+                else
+                    evtchn_device_upcall(port);
+            }
+        }
+    }
+
+    local_irq_restore(flags);
+}
+
+
+static int find_unbound_irq(void)
+{
+    int irq;
+
+    for ( irq = 0; irq < NR_IRQS; irq++ )
+        if ( irq_bindcount[irq] == 0 )
+            break;
+
+    if ( irq == NR_IRQS )
+        panic("No available IRQ to bind to: increase NR_IRQS!\n");
+
+    return irq;
+}
+
+int bind_virq_to_irq(int virq)
+{
+    evtchn_op_t op;
+    int evtchn, irq;
+
+    spin_lock(&irq_mapping_update_lock);
+
+    if ( (irq = virq_to_irq[virq]) == -1 )
+    {
+        op.cmd              = EVTCHNOP_bind_virq;
+        op.u.bind_virq.virq = virq;
+        if ( HYPERVISOR_event_channel_op(&op) != 0 )
+            panic("Failed to bind virtual IRQ %d\n", virq);
+        evtchn = op.u.bind_virq.port;
+
+        irq = find_unbound_irq();
+        evtchn_to_irq[evtchn] = irq;
+        irq_to_evtchn[irq]    = evtchn;
+
+        virq_to_irq[virq] = irq;
+    }
+
+    irq_bindcount[irq]++;
+
+    spin_unlock(&irq_mapping_update_lock);
+    
+    return irq;
+}
+
+void unbind_virq_from_irq(int virq)
+{
+    evtchn_op_t op;
+    int irq    = virq_to_irq[virq];
+    int evtchn = irq_to_evtchn[irq];
+
+    spin_lock(&irq_mapping_update_lock);
+
+    if ( --irq_bindcount[irq] == 0 )
+    {
+        op.cmd          = EVTCHNOP_close;
+        op.u.close.dom  = DOMID_SELF;
+        op.u.close.port = evtchn;
+        if ( HYPERVISOR_event_channel_op(&op) != 0 )
+            panic("Failed to unbind virtual IRQ %d\n", virq);
+
+        evtchn_to_irq[evtchn] = -1;
+        irq_to_evtchn[irq]    = -1;
+        virq_to_irq[virq]     = -1;
+    }
+
+    spin_unlock(&irq_mapping_update_lock);
+}
+
+int bind_evtchn_to_irq(int evtchn)
+{
+    int irq;
+
+    spin_lock(&irq_mapping_update_lock);
+
+    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
+    {
+        irq = find_unbound_irq();
+        evtchn_to_irq[evtchn] = irq;
+        irq_to_evtchn[irq]    = evtchn;
+    }
+
+    irq_bindcount[irq]++;
+
+    spin_unlock(&irq_mapping_update_lock);
+    
+    return irq;
+}
+
+void unbind_evtchn_from_irq(int evtchn)
+{
+    int irq = evtchn_to_irq[evtchn];
+
+    spin_lock(&irq_mapping_update_lock);
+
+    if ( --irq_bindcount[irq] == 0 )
+    {
+        evtchn_to_irq[evtchn] = -1;
+        irq_to_evtchn[irq]    = -1;
+    }
+
+    spin_unlock(&irq_mapping_update_lock);
+}
+
+
+/*
+ * Interface to generic handling in irq.c
+ */
+
+static unsigned int startup_dynirq(unsigned int irq)
+{
+    unmask_evtchn(irq_to_evtchn[irq]);
+    return 0;
+}
+
+static void shutdown_dynirq(unsigned int irq)
+{
+    mask_evtchn(irq_to_evtchn[irq]);
+}
+
+static void enable_dynirq(unsigned int irq)
+{
+    unmask_evtchn(irq_to_evtchn[irq]);
+}
+
+static void disable_dynirq(unsigned int irq)
+{
+    mask_evtchn(irq_to_evtchn[irq]);
+}
+
+static void ack_dynirq(unsigned int irq)
+{
+    mask_evtchn(irq_to_evtchn[irq]);
+    clear_evtchn(irq_to_evtchn[irq]);
+}
+
+static void end_dynirq(unsigned int irq)
+{
+    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
+        unmask_evtchn(irq_to_evtchn[irq]);
+}
+
+static struct hw_interrupt_type dynirq_type = {
+    "Dynamic-irq",
+    startup_dynirq,
+    shutdown_dynirq,
+    enable_dynirq,
+    disable_dynirq,
+    ack_dynirq,
+    end_dynirq,
+    NULL
+};
+
+static inline void pirq_unmask_notify(int pirq)
+{
+    physdev_op_t op;
+    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
+    {
+        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
+        (void)HYPERVISOR_physdev_op(&op);
+    }
+}
+
+static inline void pirq_query_unmask(int pirq)
+{
+    physdev_op_t op;
+    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
+    op.u.irq_status_query.irq = pirq;
+    (void)HYPERVISOR_physdev_op(&op);
+    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
+    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
+        set_bit(pirq, &pirq_needs_unmask_notify[0]);
+}
+
+/*
+ * On startup, if there is no action associated with the IRQ then we are
+ * probing. In this case we should not share with others as it will confuse us.
+ */
+#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
+
+static unsigned int startup_pirq(unsigned int irq)
+{
+    evtchn_op_t op;
+    int evtchn;
+
+    op.cmd               = EVTCHNOP_bind_pirq;
+    op.u.bind_pirq.pirq  = irq;
+    /* NB. We are happy to share unless we are probing. */
+    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+    if ( HYPERVISOR_event_channel_op(&op) != 0 )
+    {
+        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
+            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
+        return 0;
+    }
+    evtchn = op.u.bind_pirq.port;
+
+    pirq_query_unmask(irq_to_pirq(irq));
+
+    evtchn_to_irq[evtchn] = irq;
+    irq_to_evtchn[irq]    = evtchn;
+
+    unmask_evtchn(evtchn);
+    pirq_unmask_notify(irq_to_pirq(irq));
+
+    return 0;
+}
+
+static void shutdown_pirq(unsigned int irq)
+{
+    evtchn_op_t op;
+    int evtchn = irq_to_evtchn[irq];
+
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+
+    mask_evtchn(evtchn);
+
+    op.cmd          = EVTCHNOP_close;
+    op.u.close.dom  = DOMID_SELF;
+    op.u.close.port = evtchn;
+    if ( HYPERVISOR_event_channel_op(&op) != 0 )
+        panic("Failed to unbind physical IRQ %d\n", irq);
+
+    evtchn_to_irq[evtchn] = -1;
+    irq_to_evtchn[irq]    = -1;
+}
+
+static void enable_pirq(unsigned int irq)
+{
+    int evtchn = irq_to_evtchn[irq];
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+    unmask_evtchn(evtchn);
+    pirq_unmask_notify(irq_to_pirq(irq));
+}
+
+static void disable_pirq(unsigned int irq)
+{
+    int evtchn = irq_to_evtchn[irq];
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+    mask_evtchn(evtchn);
+}
+
+static void ack_pirq(unsigned int irq)
+{
+    int evtchn = irq_to_evtchn[irq];
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+    mask_evtchn(evtchn);
+    clear_evtchn(evtchn);
+}
+
+static void end_pirq(unsigned int irq)
+{
+    int evtchn = irq_to_evtchn[irq];
+    if ( !VALID_EVTCHN(evtchn) )
+        return;
+    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
+    {
+        unmask_evtchn(evtchn);
+        pirq_unmask_notify(irq_to_pirq(irq));
+    }
+}
+
+static struct hw_interrupt_type pirq_type = {
+    "Phys-irq",
+    startup_pirq,
+    shutdown_pirq,
+    enable_pirq,
+    disable_pirq,
+    ack_pirq,
+    end_pirq,
+    NULL
+};
+
+static irqreturn_t misdirect_interrupt(int irq, void *dev_id,
+                                      struct pt_regs *regs)
+{
+       /* nothing */
+       return IRQ_HANDLED;
+}
+
+static struct irqaction misdirect_action = {
+    misdirect_interrupt, 
+    SA_INTERRUPT, 
+    0, 
+    "misdirect", 
+    NULL, 
+    NULL
+};
+
+void irq_suspend(void)
+{
+    int virq, irq, evtchn;
+
+    /* Unbind VIRQs from event channels. */
+    for ( virq = 0; virq < NR_VIRQS; virq++ )
+    {
+        if ( (irq = virq_to_irq[virq]) == -1 )
+            continue;
+        evtchn = irq_to_evtchn[irq];
+
+        /* Mark the event channel as unused in our table. */
+        evtchn_to_irq[evtchn] = -1;
+        irq_to_evtchn[irq]    = -1;
+    }
+
+    /*
+     * We should now be unbound from all event channels. Stale bindings to 
+     * PIRQs and/or inter-domain event channels will cause us to barf here.
+     */
+    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
+        if ( evtchn_to_irq[evtchn] != -1 )
+            panic("Suspend attempted while bound to evtchn %d.\n", evtchn);
+}
+
+
+void irq_resume(void)
+{
+    evtchn_op_t op;
+    int         virq, irq, evtchn;
+
+    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
+        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
+
+    for ( virq = 0; virq < NR_VIRQS; virq++ )
+    {
+        if ( (irq = virq_to_irq[virq]) == -1 )
+            continue;
+
+        /* Get a new binding from Xen. */
+        op.cmd              = EVTCHNOP_bind_virq;
+        op.u.bind_virq.virq = virq;
+        if ( HYPERVISOR_event_channel_op(&op) != 0 )
+            panic("Failed to bind virtual IRQ %d\n", virq);
+        evtchn = op.u.bind_virq.port;
+        
+        /* Record the new mapping. */
+        evtchn_to_irq[evtchn] = irq;
+        irq_to_evtchn[irq]    = evtchn;
+
+        /* Ready for use. */
+        unmask_evtchn(evtchn);
+    }
+}
+
+void __init init_IRQ(void)
+{
+    int i;
+
+    spin_lock_init(&irq_mapping_update_lock);
+
+    /* No VIRQ -> IRQ mappings. */
+    for ( i = 0; i < NR_VIRQS; i++ )
+        virq_to_irq[i] = -1;
+
+    /* No event-channel -> IRQ mappings. */
+    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
+    {
+        evtchn_to_irq[i] = -1;
+        mask_evtchn(i); /* No event channels are 'live' right now. */
+    }
+
+    /* No IRQ -> event-channel mappings. */
+    for ( i = 0; i < NR_IRQS; i++ )
+        irq_to_evtchn[i] = -1;
+
+    for ( i = 0; i < NR_DYNIRQS; i++ )
+    {
+        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
+        irq_bindcount[dynirq_to_irq(i)] = 0;
+
+        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
+        irq_desc[dynirq_to_irq(i)].action  = 0;
+        irq_desc[dynirq_to_irq(i)].depth   = 1;
+        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
+    }
+
+    for ( i = 0; i < NR_PIRQS; i++ )
+    {
+        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
+        irq_bindcount[pirq_to_irq(i)] = 1;
+
+        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
+        irq_desc[pirq_to_irq(i)].action  = 0;
+        irq_desc[pirq_to_irq(i)].depth   = 1;
+        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
+    }
+
+    (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
+
+    /* This needs to be done early, but after the IRQ subsystem is alive. */
+    ctrl_if_init();
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/head.S b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/head.S
new file mode 100644 (file)
index 0000000..688d6e1
--- /dev/null
@@ -0,0 +1,191 @@
+
+.section __xen_guest
+       .asciz "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=1.3"
+
+.text
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/thread_info.h>
+#include <asm/asm_offsets.h>
+#include <asm/hypervisor-ifs/arch-x86_32.h>
+
+/*
+ * References to members of the new_cpu_data structure.
+ */
+
+#define X86            new_cpu_data+CPUINFO_x86
+#define X86_VENDOR     new_cpu_data+CPUINFO_x86_vendor
+#define X86_MODEL      new_cpu_data+CPUINFO_x86_model
+#define X86_MASK       new_cpu_data+CPUINFO_x86_mask
+#define X86_HARD_MATH  new_cpu_data+CPUINFO_hard_math
+#define X86_CPUID      new_cpu_data+CPUINFO_cpuid_level
+#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
+#define X86_VENDOR_ID  new_cpu_data+CPUINFO_x86_vendor_id
+
+/* Offsets in start_info structure */
+#define MOD_START      24
+#define MOD_LEN                28
+                
+ENTRY(startup_32)
+       cld
+
+       /* Set up the stack pointer */
+       lss stack_start,%esp
+
+       /* Copy initrd somewhere safe before it's clobbered by BSS. */
+       mov  MOD_LEN(%esi),%ecx
+       shr  $2,%ecx
+       jz   2f                 /* bail from copy loop if no initrd */
+       mov  $_end,%edi
+       add  MOD_LEN(%esi),%edi
+       mov  MOD_START(%esi),%eax
+       add  MOD_LEN(%esi),%eax
+1:     sub  $4,%eax
+       sub  $4,%edi
+       mov  (%eax),%ebx
+       mov  %ebx,(%edi)
+       loop 1b
+       mov  %edi,MOD_START(%esi)
+
+       /* Clear BSS first so that there are no surprises... */
+2:     xorl %eax,%eax
+       movl $__bss_start,%edi
+       movl $__bss_stop,%ecx
+       subl %edi,%ecx
+       rep stosb
+
+       /* Copy the necessary stuff from start_info structure. */
+       mov  $start_info_union,%edi
+       mov  $128,%ecx
+       rep movsl
+
+checkCPUtype:
+
+       /* get vendor info */
+       xorl %eax,%eax                  # call CPUID with 0 -> return vendor ID
+       cpuid
+       movl %eax,X86_CPUID             # save CPUID level
+       movl %ebx,X86_VENDOR_ID         # lo 4 chars
+       movl %edx,X86_VENDOR_ID+4       # next 4 chars
+       movl %ecx,X86_VENDOR_ID+8       # last 4 chars
+
+       movl $1,%eax            # Use the CPUID instruction to get CPU type
+       cpuid
+       movb %al,%cl            # save reg for future use
+       andb $0x0f,%ah          # mask processor family
+       movb %ah,X86
+       andb $0xf0,%al          # mask model
+       shrb $4,%al
+       movb %al,X86_MODEL
+       andb $0x0f,%cl          # mask mask revision
+       movb %cl,X86_MASK
+       movl %edx,X86_CAPABILITY
+
+       xorl %eax,%eax          # Clear FS/GS and LDT
+       movl %eax,%fs
+       movl %eax,%gs
+       cld             # gcc2 wants the direction flag cleared at all times
+
+       call start_kernel
+L6:
+       jmp L6                  # main should never return here, but
+                               # just in case, we know what happens.
+
+ENTRY(lgdt_finish)
+       movl $(__KERNEL_DS),%eax        # reload all the segment registers
+       movw %ax,%ss                    # after changing gdt.
+
+       movl $(__USER_DS),%eax          # DS/ES contains default USER segment
+       movw %ax,%ds
+       movw %ax,%es
+
+       popl %eax                       # reload CS by intersegment return
+       pushl $(__KERNEL_CS)
+       pushl %eax
+       lret
+
+ENTRY(stack_start)
+       .long init_thread_union+THREAD_SIZE
+       .long __BOOT_DS
+
+# XXXcl
+.globl idt_descr
+.globl cpu_gdt_descr
+
+       ALIGN
+       .word 0                         # 32-bit align idt_desc.address
+idt_descr:
+       .word IDT_ENTRIES*8-1           # idt contains 256 entries
+       .long idt_table
+# XXXcl
+
+# boot GDT descriptor (later on used by CPU#0):
+       .word 0                         # 32 bit align gdt_desc.address
+cpu_gdt_descr:
+       .word GDT_SIZE
+       .long cpu_gdt_table
+
+       .fill NR_CPUS-1,8,0             # space for the other GDT descriptors
+
+.org 0x1000
+ENTRY(empty_zero_page)
+
+.org 0x2000
+ENTRY(swapper_pg_dir)
+
+.org 0x3000
+ENTRY(cpu_gdt_table)
+       .quad 0x0000000000000000        /* NULL descriptor */
+       .quad 0x0000000000000000        /* 0x0b reserved */
+       .quad 0x0000000000000000        /* 0x13 reserved */
+       .quad 0x0000000000000000        /* 0x1b reserved */
+       .quad 0x0000000000000000        /* 0x20 unused */
+       .quad 0x0000000000000000        /* 0x28 unused */
+       .quad 0x0000000000000000        /* 0x33 TLS entry 1 */
+       .quad 0x0000000000000000        /* 0x3b TLS entry 2 */
+       .quad 0x0000000000000000        /* 0x43 TLS entry 3 */
+       .quad 0x0000000000000000        /* 0x4b reserved */
+       .quad 0x0000000000000000        /* 0x53 reserved */
+       .quad 0x0000000000000000        /* 0x5b reserved */
+
+       .quad 0x00cfbb000000c3ff        /* 0x60 kernel 4GB code at 0x00000000 */
+       .quad 0x00cfb3000000c3ff        /* 0x68 kernel 4GB data at 0x00000000 */
+       .quad 0x00cffb000000c3ff        /* 0x73 user 4GB code at 0x00000000 */
+       .quad 0x00cff3000000c3ff        /* 0x7b user 4GB data at 0x00000000 */
+
+       .quad 0x0000000000000000        /* 0x80 TSS descriptor */
+       .quad 0x0000000000000000        /* 0x88 LDT descriptor */
+
+       /* Segments used for calling PnP BIOS */
+       .quad 0x0000000000000000        /* 0x90 32-bit code */
+       .quad 0x0000000000000000        /* 0x98 16-bit code */
+       .quad 0x0000000000000000        /* 0xa0 16-bit data */
+       .quad 0x0000000000000000        /* 0xa8 16-bit data */
+       .quad 0x0000000000000000        /* 0xb0 16-bit data */
+       /*
+        * The APM segments have byte granularity and their bases
+        * and limits are set at run time.
+        */
+       .quad 0x0000000000000000        /* 0xb8 APM CS    code */
+       .quad 0x0000000000000000        /* 0xc0 APM CS 16 code (16 bit) */
+       .quad 0x0000000000000000        /* 0xc8 APM DS    data */
+
+       .quad 0x0000000000000000        /* 0xd0 - unused */
+       .quad 0x0000000000000000        /* 0xd8 - unused */
+       .quad 0x0000000000000000        /* 0xe0 - unused */
+       .quad 0x0000000000000000        /* 0xe8 - unused */
+       .quad 0x0000000000000000        /* 0xf0 - unused */
+       .quad 0x0000000000000000        /* 0xf8 - GDT entry 31: double-fault TSS */
+       .fill GDT_ENTRIES-32,8,0
+
+.org 0x4000
+ENTRY(default_ldt)
+
+.org 0x5000
+/*
+ * Real beginning of normal "text" segment
+ */
+ENTRY(stext)
+ENTRY(_stext)
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/irq.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/irq.c
new file mode 100644 (file)
index 0000000..6698c8c
--- /dev/null
@@ -0,0 +1,1192 @@
+/*
+ *     linux/arch/i386/kernel/irq.c
+ *
+ *     Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+/*
+ * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
+ *
+ * IRQs are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/delay.h>
+#include <asm/desc.h>
+#include <asm/irq.h>
+
+/*
+ * Linux has a controller-independent x86 interrupt architecture.
+ * every controller has a 'controller-template', that is used
+ * by the main code to do the right thing. Each driver-visible
+ * interrupt source is transparently wired to the apropriate
+ * controller. Thus drivers need not be aware of the
+ * interrupt-controller.
+ *
+ * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
+ * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
+ * (IO-APICs assumed to be messaging to Pentium local-APICs)
+ *
+ * the code is designed to be easily extended with new/different
+ * interrupt controllers, without having to do assembly magic.
+ */
+
+/*
+ * Controller mappings for all interrupt sources:
+ */
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+       [0 ... NR_IRQS-1] = {
+               .handler = &no_irq_type,
+               .lock = SPIN_LOCK_UNLOCKED
+       }
+};
+
+static void register_irq_proc (unsigned int irq);
+
+/*
+ * per-CPU IRQ handling stacks
+ */
+#ifdef CONFIG_4KSTACKS
+union irq_ctx *hardirq_ctx[NR_CPUS];
+union irq_ctx *softirq_ctx[NR_CPUS];
+#endif
+
+/*
+ * Special irq handlers.
+ */
+
+irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
+{ return IRQ_NONE; }
+
+/*
+ * Generic no controller code
+ */
+
+static void enable_none(unsigned int irq) { }
+static unsigned int startup_none(unsigned int irq) { return 0; }
+static void disable_none(unsigned int irq) { }
+static void ack_none(unsigned int irq)
+{
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves, it doesn't deserve
+ * a generic callback i think.
+ */
+#ifdef CONFIG_X86
+       printk("unexpected IRQ trap at vector %02x\n", irq);
+#ifdef CONFIG_X86_LOCAL_APIC
+       /*
+        * Currently unexpected vectors happen only on SMP and APIC.
+        * We _must_ ack these because every local APIC has only N
+        * irq slots per priority level, and a 'hanging, unacked' IRQ
+        * holds up an irq slot - in excessive cases (when multiple
+        * unexpected vectors occur) that might lock up the APIC
+        * completely.
+        */
+       ack_APIC_irq();
+#endif
+#endif
+}
+
+/* startup is the same as "enable", shutdown is same as "disable" */
+#define shutdown_none  disable_none
+#define end_none       enable_none
+
+struct hw_interrupt_type no_irq_type = {
+       "none",
+       startup_none,
+       shutdown_none,
+       enable_none,
+       disable_none,
+       ack_none,
+       end_none
+};
+
+atomic_t irq_err_count;
+#if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
+atomic_t irq_mis_count;
+#endif
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+       int i = *(loff_t *) v, j;
+       struct irqaction * action;
+       unsigned long flags;
+
+       if (i == 0) {
+               seq_printf(p, "           ");
+               for (j=0; j<NR_CPUS; j++)
+                       if (cpu_online(j))
+                               seq_printf(p, "CPU%d       ",j);
+               seq_putc(p, '\n');
+       }
+
+       if (i < NR_IRQS) {
+               spin_lock_irqsave(&irq_desc[i].lock, flags);
+               action = irq_desc[i].action;
+               if (!action) 
+                       goto skip;
+               seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+               seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+               for (j = 0; j < NR_CPUS; j++)
+                       if (cpu_online(j))
+                               seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+               seq_printf(p, " %14s", irq_desc[i].handler->typename);
+               seq_printf(p, "  %s", action->name);
+
+               for (action=action->next; action; action = action->next)
+                       seq_printf(p, ", %s", action->name);
+
+               seq_putc(p, '\n');
+skip:
+               spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+       } else if (i == NR_IRQS) {
+               seq_printf(p, "NMI: ");
+               for (j = 0; j < NR_CPUS; j++)
+                       if (cpu_online(j))
+                               seq_printf(p, "%10u ", nmi_count(j));
+               seq_putc(p, '\n');
+#ifdef CONFIG_X86_LOCAL_APIC
+               seq_printf(p, "LOC: ");
+               for (j = 0; j < NR_CPUS; j++)
+                       if (cpu_online(j))
+                               seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
+               seq_putc(p, '\n');
+#endif
+               seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+#if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
+               seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+#endif
+       }
+       return 0;
+}
+
+
+
+
+#ifdef CONFIG_SMP
+inline void synchronize_irq(unsigned int irq)
+{
+       while (irq_desc[irq].status & IRQ_INPROGRESS)
+               cpu_relax();
+}
+#endif
+
+/*
+ * This should really return information about whether
+ * we should do bottom half handling etc. Right now we
+ * end up _always_ checking the bottom half, which is a
+ * waste of time and is not what some drivers would
+ * prefer.
+ */
+asmlinkage int handle_IRQ_event(unsigned int irq,
+               struct pt_regs *regs, struct irqaction *action)
+{
+       int status = 1; /* Force the "do bottom halves" bit */
+       int retval = 0;
+
+       if (!(action->flags & SA_INTERRUPT))
+               local_irq_enable();
+
+       do {
+               status |= action->flags;
+               retval |= action->handler(irq, action->dev_id, regs);
+               action = action->next;
+       } while (action);
+       if (status & SA_SAMPLE_RANDOM)
+               add_interrupt_randomness(irq);
+       local_irq_disable();
+       return retval;
+}
+
+static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+       struct irqaction *action;
+
+       if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+               printk(KERN_ERR "irq event %d: bogus return value %x\n",
+                               irq, action_ret);
+       } else {
+               printk(KERN_ERR "irq %d: nobody cared!\n", irq);
+       }
+       dump_stack();
+       printk(KERN_ERR "handlers:\n");
+       action = desc->action;
+       do {
+               printk(KERN_ERR "[<%p>]", action->handler);
+               print_symbol(" (%s)",
+                       (unsigned long)action->handler);
+               printk("\n");
+               action = action->next;
+       } while (action);
+}
+
+static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+       static int count = 100;
+
+       if (count) {
+               count--;
+               __report_bad_irq(irq, desc, action_ret);
+       }
+}
+
+static int noirqdebug;
+
+static int __init noirqdebug_setup(char *str)
+{
+       noirqdebug = 1;
+       printk("IRQ lockup detection disabled\n");
+       return 1;
+}
+
+__setup("noirqdebug", noirqdebug_setup);
+
+/*
+ * If 99,900 of the previous 100,000 interrupts have not been handled then
+ * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
+ * turn the IRQ off.
+ *
+ * (The other 100-of-100,000 interrupts may have been a correctly-functioning
+ *  device sharing an IRQ with the failing one)
+ *
+ * Called under desc->lock
+ */
+static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+       if (action_ret != IRQ_HANDLED) {
+               desc->irqs_unhandled++;
+               if (action_ret != IRQ_NONE)
+                       report_bad_irq(irq, desc, action_ret);
+       }
+
+       desc->irq_count++;
+       if (desc->irq_count < 100000)
+               return;
+
+       desc->irq_count = 0;
+       if (desc->irqs_unhandled > 99900) {
+               /*
+                * The interrupt is stuck
+                */
+               __report_bad_irq(irq, desc, action_ret);
+               /*
+                * Now kill the IRQ
+                */
+               printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
+               desc->status |= IRQ_DISABLED;
+               desc->handler->disable(irq);
+       }
+       desc->irqs_unhandled = 0;
+}
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock. 
+ */
+/**
+ *     disable_irq_nosync - disable an irq without waiting
+ *     @irq: Interrupt to disable
+ *
+ *     Disable the selected interrupt line.  Disables and Enables are
+ *     nested.
+ *     Unlike disable_irq(), this function does not ensure existing
+ *     instances of the IRQ handler have completed before returning.
+ *
+ *     This function may be called from IRQ context.
+ */
+inline void disable_irq_nosync(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       if (!desc->depth++) {
+               desc->status |= IRQ_DISABLED;
+               desc->handler->disable(irq);
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/**
+ *     disable_irq - disable an irq and wait for completion
+ *     @irq: Interrupt to disable
+ *
+ *     Disable the selected interrupt line.  Enables and Disables are
+ *     nested.
+ *     This function waits for any pending IRQ handlers for this interrupt
+ *     to complete before returning. If you use this function while
+ *     holding a resource the IRQ handler may need you will deadlock.
+ *
+ *     This function may be called - with care - from IRQ context.
+ */
+void disable_irq(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       disable_irq_nosync(irq);
+       if (desc->action)
+               synchronize_irq(irq);
+}
+
+/**
+ *     enable_irq - enable handling of an irq
+ *     @irq: Interrupt to enable
+ *
+ *     Undoes the effect of one call to disable_irq().  If this
+ *     matches the last disable, processing of interrupts on this
+ *     IRQ line is re-enabled.
+ *
+ *     This function may be called from IRQ context.
+ */
+void enable_irq(unsigned int irq)
+{
+       irq_desc_t *desc = irq_desc + irq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&desc->lock, flags);
+       switch (desc->depth) {
+       case 1: {
+               unsigned int status = desc->status & ~IRQ_DISABLED;
+               desc->status = status;
+               if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+                       desc->status = status | IRQ_REPLAY;
+                       hw_resend_irq(desc->handler,irq);
+               }
+               desc->handler->enable(irq);
+               /* fall-through */
+       }
+       default:
+               desc->depth--;
+               break;
+       case 0:
+               printk("enable_irq(%u) unbalanced from %p\n", irq,
+                      __builtin_return_address(0));
+       }
+       spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
+{      
+       /* 
+        * We ack quickly, we don't want the irq controller
+        * thinking we're snobs just because some other CPU has
+        * disabled global interrupts (we have already done the
+        * INT_ACK cycles, it's too late to try to pretend to the
+        * controller that we aren't taking the interrupt).
+        *
+        * 0 return value means that this irq is already being
+        * handled by some other CPU. (or is disabled)
+        */
+       irq_desc_t *desc = irq_desc + irq;
+       struct irqaction * action;
+       unsigned int status;
+
+       irq_enter();
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+       /* Debugging check for stack overflow: is there less than 1KB free? */
+       {
+               long esp;
+
+               __asm__ __volatile__("andl %%esp,%0" :
+                                       "=r" (esp) : "0" (THREAD_SIZE - 1));
+               if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
+                       printk("do_IRQ: stack overflow: %ld\n",
+                               esp - sizeof(struct thread_info));
+                       dump_stack();
+               }
+       }
+#endif
+       kstat_this_cpu.irqs[irq]++;
+       spin_lock(&desc->lock);
+       desc->handler->ack(irq);
+       /*
+          REPLAY is when Linux resends an IRQ that was dropped earlier
+          WAITING is used by probe to mark irqs that are being tested
+          */
+       status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+       status |= IRQ_PENDING; /* we _want_ to handle it */
+
+       /*
+        * If the IRQ is disabled for whatever reason, we cannot
+        * use the action we have.
+        */
+       action = NULL;
+       if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
+               action = desc->action;
+               status &= ~IRQ_PENDING; /* we commit to handling */
+               status |= IRQ_INPROGRESS; /* we are handling it */
+       }
+       desc->status = status;
+
+       /*
+        * If there is no IRQ handler or it was disabled, exit early.
+          Since we set PENDING, if another processor is handling
+          a different instance of this same irq, the other processor
+          will take care of it.
+        */
+       if (unlikely(!action))
+               goto out;
+
+       /*
+        * Edge triggered interrupts need to remember
+        * pending events.
+        * This applies to any hw interrupts that allow a second
+        * instance of the same irq to arrive while we are in do_IRQ
+        * or in the handler. But the code here only handles the _second_
+        * instance of the irq, not the third or fourth. So it is mostly
+        * useful for irq hardware that does not mask cleanly in an
+        * SMP environment.
+        */
+#ifdef CONFIG_4KSTACKS
+
+       for (;;) {
+               irqreturn_t action_ret;
+               u32 *isp;
+               union irq_ctx * curctx;
+               union irq_ctx * irqctx;
+
+               curctx = (union irq_ctx *) current_thread_info();
+               irqctx = hardirq_ctx[smp_processor_id()];
+
+               spin_unlock(&desc->lock);
+
+               /*
+                * this is where we switch to the IRQ stack. However, if we are already using
+                * the IRQ stack (because we interrupted a hardirq handler) we can't do that
+                * and just have to keep using the current stack (which is the irq stack already
+                * after all)
+                */
+
+               if (curctx == irqctx)
+                       action_ret = handle_IRQ_event(irq, regs, action);
+               else {
+                       /* build the stack frame on the IRQ stack */
+                       isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
+                       irqctx->tinfo.task = curctx->tinfo.task;
+                       irqctx->tinfo.previous_esp = current_stack_pointer();
+
+                       *--isp = (u32) action;
+                       *--isp = (u32) regs;
+                       *--isp = (u32) irq;
+
+                       asm volatile(
+                               "       xchgl   %%ebx,%%esp     \n"
+                               "       call    handle_IRQ_event \n"
+                               "       xchgl   %%ebx,%%esp     \n"
+                               : "=a"(action_ret)
+                               : "b"(isp)
+                               : "memory", "cc", "edx", "ecx"
+                       );
+
+
+               }
+               spin_lock(&desc->lock);
+               if (!noirqdebug)
+                       note_interrupt(irq, desc, action_ret);
+               if (curctx != irqctx)
+                       irqctx->tinfo.task = NULL;
+               if (likely(!(desc->status & IRQ_PENDING)))
+                       break;
+               desc->status &= ~IRQ_PENDING;
+       }
+
+#else
+
+       for (;;) {
+               irqreturn_t action_ret;
+
+               spin_unlock(&desc->lock);
+
+               action_ret = handle_IRQ_event(irq, regs, action);
+
+               spin_lock(&desc->lock);
+               if (!noirqdebug)
+                       note_interrupt(irq, desc, action_ret);
+               if (likely(!(desc->status & IRQ_PENDING)))
+                       break;
+               desc->status &= ~IRQ_PENDING;
+       }
+#endif
+       desc->status &= ~IRQ_INPROGRESS;
+
+out:
+       /*
+        * The ->end() handler has to deal with interrupts which got
+        * disabled while the handler was running.
+        */
+       desc->handler->end(irq);
+       spin_unlock(&desc->lock);
+
+       irq_exit();
+
+       return 1;
+}
+
+int can_request_irq(unsigned int irq, unsigned long irqflags)
+{
+       struct irqaction *action;
+
+       if (irq >= NR_IRQS)
+               return 0;
+       action = irq_desc[irq].action;
+       if (action) {
+               if (irqflags & action->flags & SA_SHIRQ)
+                       action = NULL;
+       }
+       return !action;
+}
+
+/**
+ *     request_irq - allocate an interrupt line
+ *     @irq: Interrupt line to allocate
+ *     @handler: Function to be called when the IRQ occurs
+ *     @irqflags: Interrupt type flags
+ *     @devname: An ascii name for the claiming device
+ *     @dev_id: A cookie passed back to the handler function
+ *
+ *     This call allocates interrupt resources and enables the
+ *     interrupt line and IRQ handling. From the point this
+ *     call is made your handler function may be invoked. Since
+ *     your handler function must clear any interrupt the board 
+ *     raises, you must take care both to initialise your hardware
+ *     and to set up the interrupt handler in the right order.
+ *
+ *     Dev_id must be globally unique. Normally the address of the
+ *     device data structure is used as the cookie. Since the handler
+ *     receives this value it makes sense to use it.
+ *
+ *     If your interrupt is shared you must pass a non NULL dev_id
+ *     as this is required when freeing the interrupt.
+ *
+ *     Flags:
+ *
+ *     SA_SHIRQ                Interrupt is shared
+ *
+ *     SA_INTERRUPT            Disable local interrupts while processing
+ *
+ *     SA_SAMPLE_RANDOM        The interrupt can be used for entropy
+ *
+ */
+int request_irq(unsigned int irq, 
+               irqreturn_t (*handler)(int, void *, struct pt_regs *),
+               unsigned long irqflags, 
+               const char * devname,
+               void *dev_id)
+{
+       int retval;
+       struct irqaction * action;
+
+#if 1
+       /*
+        * Sanity-check: shared interrupts should REALLY pass in
+        * a real dev-ID, otherwise we'll have trouble later trying
+        * to figure out which interrupt is which (messes up the
+        * interrupt freeing logic etc).
+        */
+       if (irqflags & SA_SHIRQ) {
+               if (!dev_id)
+                       printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
+       }
+#endif
+
+       if (irq >= NR_IRQS)
+               return -EINVAL;
+       if (!handler)
+               return -EINVAL;
+
+       action = (struct irqaction *)
+                       kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+       if (!action)
+               return -ENOMEM;
+
+       action->handler = handler;
+       action->flags = irqflags;
+       action->mask = 0;
+       action->name = devname;
+       action->next = NULL;
+       action->dev_id = dev_id;
+
+       retval = setup_irq(irq, action);
+       if (retval)
+               kfree(action);
+       return retval;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+/**
+ *     free_irq - free an interrupt
+ *     @irq: Interrupt line to free
+ *     @dev_id: Device identity to free
+ *
+ *     Remove an interrupt handler. The handler is removed and if the
+ *     interrupt line is no longer in use by any driver it is disabled.
+ *     On a shared IRQ the caller must ensure the interrupt is disabled
+ *     on the card it drives before calling this function. The function
+ *     does not return until any executing interrupts for this IRQ
+ *     have completed.
+ *
+ *     This function must not be called from interrupt context. 
+ */
+void free_irq(unsigned int irq, void *dev_id)
+{
+       irq_desc_t *desc;
+       struct irqaction **p;
+       unsigned long flags;
+
+       if (irq >= NR_IRQS)
+               return;
+
+       desc = irq_desc + irq;
+       spin_lock_irqsave(&desc->lock,flags);
+       p = &desc->action;
+       for (;;) {
+               struct irqaction * action = *p;
+               if (action) {
+                       struct irqaction **pp = p;
+                       p = &action->next;
+                       if (action->dev_id != dev_id)
+                               continue;
+
+                       /* Found it - now remove it from the list of entries */
+                       *pp = action->next;
+                       if (!desc->action) {
+                               desc->status |= IRQ_DISABLED;
+                               desc->handler->shutdown(irq);
+                       }
+                       spin_unlock_irqrestore(&desc->lock,flags);
+
+                       /* Wait to make sure it's not being used on another CPU */
+                       synchronize_irq(irq);
+                       kfree(action);
+                       return;
+               }
+               printk("Trying to free free IRQ%d\n",irq);
+               spin_unlock_irqrestore(&desc->lock,flags);
+               return;
+       }
+}
+
+EXPORT_SYMBOL(free_irq);
+
+/*
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
+ */
+
+static DECLARE_MUTEX(probe_sem);
+
+/**
+ *     probe_irq_on    - begin an interrupt autodetect
+ *
+ *     Commence probing for an interrupt. The interrupts are scanned
+ *     and a mask of potential interrupt lines is returned.
+ *
+ */
+unsigned long probe_irq_on(void)
+{
+       unsigned int i;
+       irq_desc_t *desc;
+       unsigned long val;
+       unsigned long delay;
+
+       down(&probe_sem);
+       /* 
+        * something may have generated an irq long ago and we want to
+        * flush such a longstanding irq before considering it as spurious. 
+        */
+       for (i = NR_PIRQS-1; i > 0; i--)  {
+               desc = irq_desc + i;
+
+               spin_lock_irq(&desc->lock);
+               if (!irq_desc[i].action) 
+                       irq_desc[i].handler->startup(i);
+               spin_unlock_irq(&desc->lock);
+       }
+
+       /* Wait for longstanding interrupts to trigger. */
+       for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+               /* about 20ms delay */ barrier();
+
+       /*
+        * enable any unassigned irqs
+        * (we must startup again here because if a longstanding irq
+        * happened in the previous stage, it may have masked itself)
+        */
+       for (i = NR_PIRQS-1; i > 0; i--) {
+               desc = irq_desc + i;
+
+               spin_lock_irq(&desc->lock);
+               if (!desc->action) {
+                       desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
+                       if (desc->handler->startup(i))
+                               desc->status |= IRQ_PENDING;
+               }
+               spin_unlock_irq(&desc->lock);
+       }
+
+       /*
+        * Wait for spurious interrupts to trigger
+        */
+       for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+               /* about 100ms delay */ barrier();
+
+       /*
+        * Now filter out any obviously spurious interrupts
+        */
+       val = 0;
+       for (i = 0; i < NR_PIRQS; i++) {
+               irq_desc_t *desc = irq_desc + i;
+               unsigned int status;
+
+               spin_lock_irq(&desc->lock);
+               status = desc->status;
+
+               if (status & IRQ_AUTODETECT) {
+                       /* It triggered already - consider it spurious. */
+                       if (!(status & IRQ_WAITING)) {
+                               desc->status = status & ~IRQ_AUTODETECT;
+                               desc->handler->shutdown(i);
+                       } else
+                               if (i < 32)
+                                       val |= 1 << i;
+               }
+               spin_unlock_irq(&desc->lock);
+       }
+
+       return val;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+/*
+ * Return a mask of triggered interrupts (this
+ * can handle only legacy ISA interrupts).
+ */
+/**
+ *     probe_irq_mask - scan a bitmap of interrupt lines
+ *     @val:   mask of interrupts to consider
+ *
+ *     Scan the ISA bus interrupt lines and return a bitmap of
+ *     active interrupts. The interrupt probe logic state is then
+ *     returned to its previous value.
+ *
+ *     Note: we need to scan all the irq's even though we will
+ *     only return ISA irq numbers - just so that we reset them
+ *     all to a known state.
+ */
+unsigned int probe_irq_mask(unsigned long val)
+{
+       int i;
+       unsigned int mask;
+
+       mask = 0;
+       for (i = 0; i < NR_PIRQS; i++) {
+               irq_desc_t *desc = irq_desc + i;
+               unsigned int status;
+
+               spin_lock_irq(&desc->lock);
+               status = desc->status;
+
+               if (status & IRQ_AUTODETECT) {
+                       if (i < 16 && !(status & IRQ_WAITING))
+                               mask |= 1 << i;
+
+                       desc->status = status & ~IRQ_AUTODETECT;
+                       desc->handler->shutdown(i);
+               }
+               spin_unlock_irq(&desc->lock);
+       }
+       up(&probe_sem);
+
+       return mask & val;
+}
+
+/*
+ * Return the one interrupt that triggered (this can
+ * handle any interrupt source).
+ */
+
+/**
+ *     probe_irq_off   - end an interrupt autodetect
+ *     @val: mask of potential interrupts (unused)
+ *
+ *     Scans the unused interrupt lines and returns the line which
+ *     appears to have triggered the interrupt. If no interrupt was
+ *     found then zero is returned. If more than one interrupt is
+ *     found then minus the first candidate is returned to indicate
+ *     their is doubt.
+ *
+ *     The interrupt probe logic state is returned to its previous
+ *     value.
+ *
+ *     BUGS: When used in a module (which arguably shouldnt happen)
+ *     nothing prevents two IRQ probe callers from overlapping. The
+ *     results of this are non-optimal.
+ */
+int probe_irq_off(unsigned long val)
+{
+       int i, irq_found, nr_irqs;
+
+       nr_irqs = 0;
+       irq_found = 0;
+       for (i = 0; i < NR_PIRQS; i++) {
+               irq_desc_t *desc = irq_desc + i;
+               unsigned int status;
+
+               spin_lock_irq(&desc->lock);
+               status = desc->status;
+
+               if (status & IRQ_AUTODETECT) {
+                       if (!(status & IRQ_WAITING)) {
+                               if (!nr_irqs)
+                                       irq_found = i;
+                               nr_irqs++;
+                       }
+                       desc->status = status & ~IRQ_AUTODETECT;
+                       desc->handler->shutdown(i);
+               }
+               spin_unlock_irq(&desc->lock);
+       }
+       up(&probe_sem);
+
+       if (nr_irqs > 1)
+               irq_found = -irq_found;
+       return irq_found;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+/* this was setup_x86_irq but it seems pretty generic */
+int setup_irq(unsigned int irq, struct irqaction * new)
+{
+       int shared = 0;
+       unsigned long flags;
+       struct irqaction *old, **p;
+       irq_desc_t *desc = irq_desc + irq;
+
+       if (desc->handler == &no_irq_type)
+               return -ENOSYS;
+       /*
+        * Some drivers like serial.c use request_irq() heavily,
+        * so we have to be careful not to interfere with a
+        * running system.
+        */
+       if (new->flags & SA_SAMPLE_RANDOM) {
+               /*
+                * This function might sleep, we want to call it first,
+                * outside of the atomic block.
+                * Yes, this might clear the entropy pool if the wrong
+                * driver is attempted to be loaded, without actually
+                * installing a new handler, but is this really a problem,
+                * only the sysadmin is able to do this.
+                */
+               rand_initialize_irq(irq);
+       }
+
+       /*
+        * The following block of code has to be executed atomically
+        */
+       spin_lock_irqsave(&desc->lock,flags);
+       p = &desc->action;
+       if ((old = *p) != NULL) {
+               /* Can't share interrupts unless both agree to */
+               if (!(old->flags & new->flags & SA_SHIRQ)) {
+                       spin_unlock_irqrestore(&desc->lock,flags);
+                       return -EBUSY;
+               }
+
+               /* add new interrupt at end of irq queue */
+               do {
+                       p = &old->next;
+                       old = *p;
+               } while (old);
+               shared = 1;
+       }
+
+       *p = new;
+
+       if (!shared) {
+               desc->depth = 0;
+               desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
+               desc->handler->startup(irq);
+       }
+       spin_unlock_irqrestore(&desc->lock,flags);
+
+       register_irq_proc(irq);
+       return 0;
+}
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NR_IRQS];
+
+#ifdef CONFIG_SMP
+
+static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
+
+cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+
+static int irq_affinity_read_proc(char *page, char **start, off_t off,
+                       int count, int *eof, void *data)
+{
+       int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
+       if (count - len < 2)
+               return -EINVAL;
+       len += sprintf(page + len, "\n");
+       return len;
+}
+
+static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
+                                       unsigned long count, void *data)
+{
+       int irq = (long)data, full_count = count, err;
+       cpumask_t new_value, tmp;
+
+       if (!irq_desc[irq].handler->set_affinity)
+               return -EIO;
+
+       err = cpumask_parse(buffer, count, new_value);
+       if (err)
+               return err;
+
+       /*
+        * Do not allow disabling IRQs completely - it's a too easy
+        * way to make the system unusable accidentally :-) At least
+        * one online CPU still has to be targeted.
+        */
+       cpus_and(tmp, new_value, cpu_online_map);
+       if (cpus_empty(tmp))
+               return -EINVAL;
+
+       irq_affinity[irq] = new_value;
+       irq_desc[irq].handler->set_affinity(irq,
+                                       cpumask_of_cpu(first_cpu(new_value)));
+
+       return full_count;
+}
+
+#endif
+
+static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
+                       int count, int *eof, void *data)
+{
+       int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
+       if (count - len < 2)
+               return -EINVAL;
+       len += sprintf(page + len, "\n");
+       return len;
+}
+
+static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
+                                       unsigned long count, void *data)
+{
+       cpumask_t *mask = (cpumask_t *)data;
+       unsigned long full_count = count, err;
+       cpumask_t new_value;
+
+       err = cpumask_parse(buffer, count, new_value);
+       if (err)
+               return err;
+
+       *mask = new_value;
+       return full_count;
+}
+
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+       char name [MAX_NAMELEN];
+
+       if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
+                       irq_dir[irq])
+               return;
+
+       memset(name, 0, MAX_NAMELEN);
+       sprintf(name, "%d", irq);
+
+       /* create /proc/irq/1234 */
+       irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+#ifdef CONFIG_SMP
+       {
+               struct proc_dir_entry *entry;
+
+               /* create /proc/irq/1234/smp_affinity */
+               entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+               if (entry) {
+                       entry->nlink = 1;
+                       entry->data = (void *)(long)irq;
+                       entry->read_proc = irq_affinity_read_proc;
+                       entry->write_proc = irq_affinity_write_proc;
+               }
+
+               smp_affinity_entry[irq] = entry;
+       }
+#endif
+}
+
+unsigned long prof_cpu_mask = -1;
+
+void init_irq_proc (void)
+{
+       struct proc_dir_entry *entry;
+       int i;
+
+       /* create /proc/irq */
+       root_irq_dir = proc_mkdir("irq", 0);
+
+       /* create /proc/irq/prof_cpu_mask */
+       entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
+
+       if (!entry)
+           return;
+
+       entry->nlink = 1;
+       entry->data = (void *)&prof_cpu_mask;
+       entry->read_proc = prof_cpu_mask_read_proc;
+       entry->write_proc = prof_cpu_mask_write_proc;
+
+       /*
+        * Create entries for all existing IRQs.
+        */
+       for (i = 0; i < NR_IRQS; i++)
+               register_irq_proc(i);
+}
+
+
+#ifdef CONFIG_4KSTACKS
+static char softirq_stack[NR_CPUS * THREAD_SIZE]  __attribute__((__aligned__(THREAD_SIZE), __section__(".bss.page_aligned")));
+static char hardirq_stack[NR_CPUS * THREAD_SIZE]  __attribute__((__aligned__(THREAD_SIZE), __section__(".bss.page_aligned")));
+
+/*
+ * allocate per-cpu stacks for hardirq and for softirq processing
+ */
+void irq_ctx_init(int cpu)
+{
+       union irq_ctx *irqctx;
+
+       if (hardirq_ctx[cpu])
+               return;
+
+       irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+
+       hardirq_ctx[cpu] = irqctx;
+
+       irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = SOFTIRQ_OFFSET;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+
+       softirq_ctx[cpu] = irqctx;
+
+       printk("CPU %u irqstacks, hard=%p soft=%p\n",
+               cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
+}
+
+extern asmlinkage void __do_softirq(void);
+
+asmlinkage void do_softirq(void)
+{
+       unsigned long flags;
+       struct thread_info *curctx;
+       union irq_ctx *irqctx;
+       u32 *isp;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+
+       if (local_softirq_pending()) {
+               curctx = current_thread_info();
+               irqctx = softirq_ctx[smp_processor_id()];
+               irqctx->tinfo.task = curctx->task;
+               irqctx->tinfo.previous_esp = current_stack_pointer();
+
+               /* build the stack frame on the softirq stack */
+               isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
+
+
+               asm volatile(
+                       "       xchgl   %%ebx,%%esp     \n"
+                       "       call    __do_softirq    \n"
+                       "       movl    %%ebx,%%esp     \n"
+                       : "=b"(isp)
+                       : "0"(isp)
+                       : "memory", "cc", "edx", "ecx", "eax"
+               );
+       }
+
+       local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(do_softirq);
+#endif
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/ldt.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/ldt.c
new file mode 100644 (file)
index 0000000..8b4b77e
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * linux/kernel/ldt.c
+ *
+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/ldt.h>
+#include <asm/desc.h>
+
+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
+static void flush_ldt(void *null)
+{
+       if (current->active_mm)
+               load_LDT(&current->active_mm->context);
+}
+#endif
+
+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+{
+       void *oldldt;
+       void *newldt;
+       int oldsize;
+
+       if (mincount <= pc->size)
+               return 0;
+       oldsize = pc->size;
+       mincount = (mincount+511)&(~511);
+       if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
+               newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
+       else
+               newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
+
+       if (!newldt)
+               return -ENOMEM;
+
+       if (oldsize)
+               memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
+       oldldt = pc->ldt;
+       memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
+       pc->ldt = newldt;
+       wmb();
+       pc->size = mincount;
+       wmb();
+
+       if (reload) {
+#ifdef CONFIG_SMP
+               cpumask_t mask;
+               preempt_disable();
+#endif
+               make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
+                                   PAGE_SIZE);
+               load_LDT(pc);
+               flush_page_update_queue();
+#ifdef CONFIG_SMP
+               mask = cpumask_of_cpu(smp_processor_id());
+               if (!cpus_equal(current->mm->cpu_vm_mask, mask))
+                       smp_call_function(flush_ldt, 0, 1, 1);
+               preempt_enable();
+#endif
+       }
+       if (oldsize) {
+               if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
+                       vfree(oldldt);
+               else
+                       kfree(oldldt);
+       }
+       return 0;
+}
+
+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+{
+       int err = alloc_ldt(new, old->size, 0);
+       if (err < 0)
+               return err;
+       memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+       make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
+                           PAGE_SIZE);
+       return 0;
+}
+
+/*
+ * we do not have to muck with descriptors here, that is
+ * done in switch_mm() as needed.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+       struct mm_struct * old_mm;
+       int retval = 0;
+
+       init_MUTEX(&mm->context.sem);
+       mm->context.size = 0;
+       old_mm = current->mm;
+       if (old_mm && old_mm->context.size > 0) {
+               down(&old_mm->context.sem);
+               retval = copy_ldt(&mm->context, &old_mm->context);
+               up(&old_mm->context.sem);
+       }
+       return retval;
+}
+
+/*
+ * No need to lock the MM as we are the last user
+ */
+void destroy_context(struct mm_struct *mm)
+{
+       if (mm->context.size) {
+               if (mm == current->active_mm)
+                       clear_LDT();
+               make_pages_writeable(mm->context.ldt, 
+                                    (mm->context.size * LDT_ENTRY_SIZE) /
+                                    PAGE_SIZE);
+               flush_page_update_queue();
+               if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
+                       vfree(mm->context.ldt);
+               else
+                       kfree(mm->context.ldt);
+               mm->context.size = 0;
+       }
+}
+
+static int read_ldt(void __user * ptr, unsigned long bytecount)
+{
+       int err;
+       unsigned long size;
+       struct mm_struct * mm = current->mm;
+
+       if (!mm->context.size)
+               return 0;
+       if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+               bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+
+       down(&mm->context.sem);
+       size = mm->context.size*LDT_ENTRY_SIZE;
+       if (size > bytecount)
+               size = bytecount;
+
+       err = 0;
+       if (copy_to_user(ptr, mm->context.ldt, size))
+               err = -EFAULT;
+       up(&mm->context.sem);
+       if (err < 0)
+               return err;
+       if (size != bytecount) {
+               /* zero-fill the rest */
+               clear_user(ptr+size, bytecount-size);
+       }
+       return bytecount;
+}
+
+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
+{
+       int err;
+       unsigned long size;
+       void *address;
+
+       err = 0;
+       address = &default_ldt[0];
+       size = 5*sizeof(struct desc_struct);
+       if (size > bytecount)
+               size = bytecount;
+
+       err = size;
+       if (copy_to_user(ptr, address, size))
+               err = -EFAULT;
+
+       return err;
+}
+
+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
+{
+       struct mm_struct * mm = current->mm;
+       __u32 entry_1, entry_2, *lp;
+       unsigned long phys_lp, max_limit;
+       int error;
+       struct user_desc ldt_info;
+
+       error = -EINVAL;
+       if (bytecount != sizeof(ldt_info))
+               goto out;
+       error = -EFAULT;        
+       if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
+               goto out;
+
+       error = -EINVAL;
+       if (ldt_info.entry_number >= LDT_ENTRIES)
+               goto out;
+       if (ldt_info.contents == 3) {
+               if (oldmode)
+                       goto out;
+               if (ldt_info.seg_not_present == 0)
+                       goto out;
+       }
+
+       /*
+        * This makes our tests for overlap with Xen space
+        * easier. There's no good reason to have a user segment
+        * starting this high anyway.
+        */
+       if (ldt_info.base_addr >= PAGE_OFFSET)
+               goto out;
+
+       down(&mm->context.sem);
+       if (ldt_info.entry_number >= mm->context.size) {
+               error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
+               if (error < 0)
+                       goto out_unlock;
+       }
+
+       lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
+       phys_lp = arbitrary_virt_to_phys(lp);
+
+       /* Allow LDTs to be cleared by the user. */
+       if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+               if (oldmode || LDT_empty(&ldt_info)) {
+                       entry_1 = 0;
+                       entry_2 = 0;
+                       goto install;
+               }
+       }
+
+       max_limit = HYPERVISOR_VIRT_START - ldt_info.base_addr;
+       if (ldt_info.limit_in_pages)
+               max_limit >>= PAGE_SHIFT;
+       max_limit--;
+       if ((ldt_info.limit & 0xfffff) > (max_limit & 0xfffff))
+               ldt_info.limit = max_limit;
+
+       entry_1 = LDT_entry_a(&ldt_info);
+       entry_2 = LDT_entry_b(&ldt_info);
+       if (oldmode)
+               entry_2 &= ~(1 << 20);
+
+       /* Install the new entry ...  */
+install:
+       error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
+
+out_unlock:
+       up(&mm->context.sem);
+out:
+       return error;
+}
+
+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+{
+       int ret = -ENOSYS;
+
+       switch (func) {
+       case 0:
+               ret = read_ldt(ptr, bytecount);
+               break;
+       case 1:
+               ret = write_ldt(ptr, bytecount, 1);
+               break;
+       case 2:
+               ret = read_default_ldt(ptr, bytecount);
+               break;
+       case 0x11:
+               ret = write_ldt(ptr, bytecount, 0);
+               break;
+       }
+       return ret;
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/process.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/process.c
new file mode 100644 (file)
index 0000000..1d7f637
--- /dev/null
@@ -0,0 +1,782 @@
+/*
+ *  linux/arch/i386/kernel/process.c
+ *
+ *  Copyright (C) 1995  Linus Torvalds
+ *
+ *  Pentium III FXSR, SSE support
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/elfcore.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/mc146818rtc.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/ptrace.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/ldt.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/irq.h>
+#include <asm/desc.h>
+#ifdef CONFIG_MATH_EMULATION
+#include <asm/math_emu.h>
+#endif
+
+#include <linux/irq.h>
+#include <linux/err.h>
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+int hlt_counter;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+       return ((unsigned long *)tsk->thread.esp)[3];
+}
+
+/*
+ * Powermanagement idle function, if any..
+ */
+void (*pm_idle)(void);
+
+void disable_hlt(void)
+{
+       hlt_counter++;
+}
+
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+       hlt_counter--;
+}
+
+EXPORT_SYMBOL(enable_hlt);
+
+/*
+ * We use this if we don't have any better
+ * idle routine..
+ */
+void default_idle(void)
+{
+       if (!hlt_counter && current_cpu_data.hlt_works_ok) {
+               local_irq_disable();
+               if (!need_resched())
+                       safe_halt();
+               else
+                       local_irq_enable();
+       }
+}
+
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static void poll_idle (void)
+{
+       int oldval;
+
+       local_irq_enable();
+
+       /*
+        * Deal with another CPU just having chosen a thread to
+        * run here:
+        */
+       oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+       if (!oldval) {
+               set_thread_flag(TIF_POLLING_NRFLAG);
+               asm volatile(
+                       "2:"
+                       "testl %0, %1;"
+                       "rep; nop;"
+                       "je 2b;"
+                       : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
+
+               clear_thread_flag(TIF_POLLING_NRFLAG);
+       } else {
+               set_need_resched();
+       }
+}
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle (void)
+{
+       /* endless idle loop with no priority at all */
+       while (1) {
+               while (!need_resched()) {
+                       void (*idle)(void) = pm_idle;
+
+                       if (!idle)
+                               idle = default_idle;
+
+                       irq_stat[smp_processor_id()].idle_timestamp = jiffies;
+                       idle();
+               }
+               schedule();
+       }
+}
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ */
+static void mwait_idle(void)
+{
+       local_irq_enable();
+
+       if (!need_resched()) {
+               set_thread_flag(TIF_POLLING_NRFLAG);
+               do {
+                       __monitor((void *)&current_thread_info()->flags, 0, 0);
+                       if (need_resched())
+                               break;
+                       __mwait(0, 0);
+               } while (!need_resched());
+               clear_thread_flag(TIF_POLLING_NRFLAG);
+       }
+}
+
+void __init select_idle_routine(const struct cpuinfo_x86 *c)
+{
+       if (cpu_has(c, X86_FEATURE_MWAIT)) {
+               printk("monitor/mwait feature present.\n");
+               /*
+                * Skip, if setup has overridden idle.
+                * Also, take care of system with asymmetric CPUs.
+                * Use, mwait_idle only if all cpus support it.
+                * If not, we fallback to default_idle()
+                */
+               if (!pm_idle) {
+                       printk("using mwait in idle threads.\n");
+                       pm_idle = mwait_idle;
+               }
+               return;
+       }
+       pm_idle = xen_cpu_idle;
+       return;
+}
+
+static int __init idle_setup (char *str)
+{
+       if (!strncmp(str, "poll", 4)) {
+               printk("using polling idle threads.\n");
+               pm_idle = poll_idle;
+#ifdef CONFIG_X86_SMP
+               if (smp_num_siblings > 1)
+                       printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
+#endif
+       } else if (!strncmp(str, "halt", 4)) {
+               printk("using halt in idle threads.\n");
+               pm_idle = default_idle;
+       }
+
+       return 1;
+}
+
+__setup("idle=", idle_setup);
+
+void show_regs(struct pt_regs * regs)
+{
+       printk("\n");
+       printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
+       printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
+       print_symbol("EIP is at %s\n", regs->eip);
+
+       if (regs->xcs & 2)
+               printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
+       printk(" EFLAGS: %08lx    %s  (%s)\n",regs->eflags, print_tainted(),UTS_RELEASE);
+       printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+               regs->eax,regs->ebx,regs->ecx,regs->edx);
+       printk("ESI: %08lx EDI: %08lx EBP: %08lx",
+               regs->esi, regs->edi, regs->ebp);
+       printk(" DS: %04x ES: %04x\n",
+               0xffff & regs->xds,0xffff & regs->xes);
+
+       show_trace(NULL, &regs->esp);
+}
+
+/*
+ * This gets run with %ebx containing the
+ * function to call, and %edx containing
+ * the "args".
+ */
+extern void kernel_thread_helper(void);
+__asm__(".section .text\n"
+       ".align 4\n"
+       "kernel_thread_helper:\n\t"
+       "movl %edx,%eax\n\t"
+       "pushl %edx\n\t"
+       "call *%ebx\n\t"
+       "pushl %eax\n\t"
+       "call do_exit\n"
+       ".previous");
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+       struct pt_regs regs;
+
+       memset(&regs, 0, sizeof(regs));
+
+       regs.ebx = (unsigned long) fn;
+       regs.edx = (unsigned long) arg;
+
+       regs.xds = __USER_DS;
+       regs.xes = __USER_DS;
+       regs.orig_eax = -1;
+       regs.eip = (unsigned long) kernel_thread_helper;
+       regs.xcs = __KERNEL_CS;
+       regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
+
+       /* Ok, create the new process.. */
+       return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+       struct task_struct *tsk = current;
+
+       /* The process may have allocated an io port bitmap... nuke it. */
+       if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
+               int cpu = get_cpu();
+               struct tss_struct *tss = init_tss + cpu;
+               kfree(tsk->thread.io_bitmap_ptr);
+               tsk->thread.io_bitmap_ptr = NULL;
+               tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+               put_cpu();
+       }
+}
+
+void flush_thread(void)
+{
+       struct task_struct *tsk = current;
+
+       memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
+       memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));        
+       /*
+        * Forget coprocessor state..
+        */
+       clear_fpu(tsk);
+       tsk->used_math = 0;
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+       if (dead_task->mm) {
+               // temporary debugging check
+               if (dead_task->mm->context.size) {
+                       printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
+                                       dead_task->comm,
+                                       dead_task->mm->context.ldt,
+                                       dead_task->mm->context.size);
+                       BUG();
+               }
+       }
+
+       release_x86_irqs(dead_task);
+}
+
+/*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+void prepare_to_copy(struct task_struct *tsk)
+{
+       unlazy_fpu(tsk);
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
+       unsigned long unused,
+       struct task_struct * p, struct pt_regs * regs)
+{
+       struct pt_regs * childregs;
+       struct task_struct *tsk;
+       int err;
+
+       childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+       struct_cpy(childregs, regs);
+       childregs->eax = 0;
+       childregs->esp = esp;
+       p->set_child_tid = p->clear_child_tid = NULL;
+
+       p->thread.esp = (unsigned long) childregs;
+       p->thread.esp0 = (unsigned long) (childregs+1);
+
+       p->thread.eip = (unsigned long) ret_from_fork;
+
+       savesegment(fs,p->thread.fs);
+       savesegment(gs,p->thread.gs);
+
+       tsk = current;
+       if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
+               p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+               if (!p->thread.io_bitmap_ptr)
+                       return -ENOMEM;
+               memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
+                       IO_BITMAP_BYTES);
+       }
+
+       /*
+        * Set a new TLS for the child thread?
+        */
+       if (clone_flags & CLONE_SETTLS) {
+               struct desc_struct *desc;
+               struct user_desc info;
+               int idx;
+
+               err = -EFAULT;
+               if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
+                       goto out;
+               err = -EINVAL;
+               if (LDT_empty(&info))
+                       goto out;
+
+               idx = info.entry_number;
+               if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+                       goto out;
+
+               desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+               desc->a = LDT_entry_a(&info);
+               desc->b = LDT_entry_b(&info);
+       }
+
+       err = 0;
+ out:
+       if (err && p->thread.io_bitmap_ptr)
+               kfree(p->thread.io_bitmap_ptr);
+       return err;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+       int i;
+
+/* changed the size calculations - should hopefully work better. lbt */
+       dump->magic = CMAGIC;
+       dump->start_code = 0;
+       dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
+       dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+       dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+       dump->u_dsize -= dump->u_tsize;
+       dump->u_ssize = 0;
+       for (i = 0; i < 8; i++)
+               dump->u_debugreg[i] = current->thread.debugreg[i];  
+
+       if (dump->start_stack < TASK_SIZE)
+               dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+
+       dump->regs.ebx = regs->ebx;
+       dump->regs.ecx = regs->ecx;
+       dump->regs.edx = regs->edx;
+       dump->regs.esi = regs->esi;
+       dump->regs.edi = regs->edi;
+       dump->regs.ebp = regs->ebp;
+       dump->regs.eax = regs->eax;
+       dump->regs.ds = regs->xds;
+       dump->regs.es = regs->xes;
+       savesegment(fs,dump->regs.fs);
+       savesegment(gs,dump->regs.gs);
+       dump->regs.orig_eax = regs->orig_eax;
+       dump->regs.eip = regs->eip;
+       dump->regs.cs = regs->xcs;
+       dump->regs.eflags = regs->eflags;
+       dump->regs.esp = regs->esp;
+       dump->regs.ss = regs->xss;
+
+       dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+}
+
+/* 
+ * Capture the user space registers if the task is not running (in user space)
+ */
+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+{
+       struct pt_regs ptregs;
+       
+       ptregs = *(struct pt_regs *)
+               ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
+       ptregs.xcs &= 0xffff;
+       ptregs.xds &= 0xffff;
+       ptregs.xes &= 0xffff;
+       ptregs.xss &= 0xffff;
+
+       elf_core_copy_regs(regs, &ptregs);
+
+       return 1;
+}
+
+/*
+ * This special macro can be used to load a debugging register
+ */
+#define loaddebug(thread,register) \
+               HYPERVISOR_set_debugreg((register),     \
+                       (thread->debugreg[register]))
+
+/*
+ *     switch_to(x,yn) should switch tasks from x to y.
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process. Lazy FP saving no longer makes any sense
+ * with modern CPU's, and this simplifies a lot of things (SMP
+ * and UP become the same).
+ *
+ * NOTE! We used to use the x86 hardware context switching. The
+ * reason for not using it any more becomes apparent when you
+ * try to recover gracefully from saved state that is no longer
+ * valid (stale segment register values in particular). With the
+ * hardware task-switch, there is no way to fix up bad state in
+ * a reasonable manner.
+ *
+ * The fact that Intel documents the hardware task-switching to
+ * be slow is a fairly red herring - this code is not noticeably
+ * faster. However, there _is_ some room for improvement here,
+ * so the performance issues may eventually be a valid point.
+ * More important, however, is the fact that this allows us much
+ * more flexibility.
+ *
+ * The return value (in %eax) will be the "prev" task after
+ * the task-switch, and shows up in ret_from_fork in entry.S,
+ * for example.
+ */
+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+{
+       struct thread_struct *prev = &prev_p->thread,
+                                *next = &next_p->thread;
+       int cpu = smp_processor_id();
+       struct tss_struct *tss = init_tss + cpu;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       /*
+        * Save away %fs and %gs. No need to save %es and %ds, as
+        * those are always kernel segments while inside the kernel.
+        */
+       asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
+       asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
+
+       /*
+        * We clobber FS and GS here so that we avoid a GPF when
+        * restoring previous task's FS/GS values in Xen when the LDT
+        * is switched. If we don't do this then we can end up
+        * erroneously re-flushing the page-update queue when we
+        * 'execute_multicall_list'.
+        */
+       __asm__ __volatile__ ( 
+               "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
+               "eax" );
+
+       flush_page_update_queue();
+
+       /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+       __unlazy_fpu(prev_p);
+
+       /*
+        * Reload esp0, LDT and the page table pointer:
+        */
+       load_esp0(tss, next);
+
+       /*
+        * Load the per-thread Thread-Local Storage descriptor.
+        */
+       load_TLS(next, cpu);
+
+       local_irq_restore(flags);
+
+       /*
+        * Restore %fs and %gs if needed.
+        */
+       if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) {
+               loadsegment(fs, next->fs);
+               loadsegment(gs, next->gs);
+       }
+
+       /*
+        * Now maybe reload the debug registers
+        */
+       if (unlikely(next->debugreg[7])) {
+               loaddebug(next, 0);
+               loaddebug(next, 1);
+               loaddebug(next, 2);
+               loaddebug(next, 3);
+               /* no 4 and 5 */
+               loaddebug(next, 6);
+               loaddebug(next, 7);
+       }
+
+       if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
+               if (next->io_bitmap_ptr) {
+                       /*
+                        * 4 cachelines copy ... not good, but not that
+                        * bad either. Anyone got something better?
+                        * This only affects processes which use ioperm().
+                        * [Putting the TSSs into 4k-tlb mapped regions
+                        * and playing VM tricks to switch the IO bitmap
+                        * is not really acceptable.]
+                        */
+                       memcpy(tss->io_bitmap, next->io_bitmap_ptr,
+                               IO_BITMAP_BYTES);
+                       tss->io_bitmap_base = IO_BITMAP_OFFSET;
+               } else
+                       /*
+                        * a bitmap offset pointing outside of the TSS limit
+                        * causes a nicely controllable SIGSEGV if a process
+                        * tries to use a port IO instruction. The first
+                        * sys_ioperm() call sets up the bitmap properly.
+                        */
+                       tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+       }
+       return prev_p;
+}
+
+asmlinkage int sys_fork(struct pt_regs regs)
+{
+       return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
+}
+
+asmlinkage int sys_clone(struct pt_regs regs)
+{
+       unsigned long clone_flags;
+       unsigned long newsp;
+       int __user *parent_tidptr, *child_tidptr;
+
+       clone_flags = regs.ebx;
+       newsp = regs.ecx;
+       parent_tidptr = (int __user *)regs.edx;
+       child_tidptr = (int __user *)regs.edi;
+       if (!newsp)
+               newsp = regs.esp;
+       return do_fork(clone_flags & ~CLONE_IDLETASK, newsp, &regs, 0, parent_tidptr, child_tidptr);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(struct pt_regs regs)
+{
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(struct pt_regs regs)
+{
+       int error;
+       char * filename;
+
+       filename = getname((char __user *) regs.ebx);
+       error = PTR_ERR(filename);
+       if (IS_ERR(filename))
+               goto out;
+       error = do_execve(filename,
+                       (char __user * __user *) regs.ecx,
+                       (char __user * __user *) regs.edx,
+                       &regs);
+       if (error == 0) {
+               current->ptrace &= ~PT_DTRACE;
+               /* Make sure we don't return using sysenter.. */
+               set_thread_flag(TIF_IRET);
+       }
+       putname(filename);
+out:
+       return error;
+}
+
+#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
+#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
+
+unsigned long get_wchan(struct task_struct *p)
+{
+       unsigned long ebp, esp, eip;
+       unsigned long stack_page;
+       int count = 0;
+       if (!p || p == current || p->state == TASK_RUNNING)
+               return 0;
+       stack_page = (unsigned long)p->thread_info;
+       esp = p->thread.esp;
+       if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
+               return 0;
+       /* include/asm-i386/system.h:switch_to() pushes ebp last. */
+       ebp = *(unsigned long *) esp;
+       do {
+               if (ebp < stack_page || ebp > top_ebp+stack_page)
+                       return 0;
+               eip = *(unsigned long *) (ebp+4);
+               if (!in_sched_functions(eip))
+                       return eip;
+               ebp = *(unsigned long *) ebp;
+       } while (count++ < 16);
+       return 0;
+}
+
+/*
+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+ */
+static int get_free_idx(void)
+{
+       struct thread_struct *t = &current->thread;
+       int idx;
+
+       for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+               if (desc_empty(t->tls_array + idx))
+                       return idx + GDT_ENTRY_TLS_MIN;
+       return -ESRCH;
+}
+
+/*
+ * Set a given TLS descriptor:
+ */
+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
+{
+       struct thread_struct *t = &current->thread;
+       struct user_desc info;
+       struct desc_struct *desc;
+       int cpu, idx;
+
+       if (copy_from_user(&info, u_info, sizeof(info)))
+               return -EFAULT;
+       idx = info.entry_number;
+
+       /*
+        * index -1 means the kernel should try to find and
+        * allocate an empty descriptor:
+        */
+       if (idx == -1) {
+               idx = get_free_idx();
+               if (idx < 0)
+                       return idx;
+               if (put_user(idx, &u_info->entry_number))
+                       return -EFAULT;
+       }
+
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
+
+       desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+       /*
+        * We must not get preempted while modifying the TLS.
+        */
+       cpu = get_cpu();
+
+       if (LDT_empty(&info)) {
+               desc->a = 0;
+               desc->b = 0;
+       } else {
+               desc->a = LDT_entry_a(&info);
+               desc->b = LDT_entry_b(&info);
+       }
+       load_TLS(t, cpu);
+
+       put_cpu();
+
+       return 0;
+}
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_BASE(desc) ( \
+       (((desc)->a >> 16) & 0x0000ffff) | \
+       (((desc)->b << 16) & 0x00ff0000) | \
+       ( (desc)->b        & 0xff000000)   )
+
+#define GET_LIMIT(desc) ( \
+       ((desc)->a & 0x0ffff) | \
+        ((desc)->b & 0xf0000) )
+       
+#define GET_32BIT(desc)                (((desc)->b >> 22) & 1)
+#define GET_CONTENTS(desc)     (((desc)->b >> 10) & 3)
+#define GET_WRITABLE(desc)     (((desc)->b >>  9) & 1)
+#define GET_LIMIT_PAGES(desc)  (((desc)->b >> 23) & 1)
+#define GET_PRESENT(desc)      (((desc)->b >> 15) & 1)
+#define GET_USEABLE(desc)      (((desc)->b >> 20) & 1)
+
+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
+{
+       struct user_desc info;
+       struct desc_struct *desc;
+       int idx;
+
+       if (get_user(idx, &u_info->entry_number))
+               return -EFAULT;
+       if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+               return -EINVAL;
+
+       desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+       info.entry_number = idx;
+       info.base_addr = GET_BASE(desc);
+       info.limit = GET_LIMIT(desc);
+       info.seg_32bit = GET_32BIT(desc);
+       info.contents = GET_CONTENTS(desc);
+       info.read_exec_only = !GET_WRITABLE(desc);
+       info.limit_in_pages = GET_LIMIT_PAGES(desc);
+       info.seg_not_present = !GET_PRESENT(desc);
+       info.useable = GET_USEABLE(desc);
+
+       if (copy_to_user(u_info, &info, sizeof(info)))
+               return -EFAULT;
+       return 0;
+}
+
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/setup.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/setup.c
new file mode 100644 (file)
index 0000000..55a4a84
--- /dev/null
@@ -0,0 +1,1218 @@
+/*
+ *  linux/arch/i386/kernel/setup.c
+ *
+ *  Copyright (C) 1995  Linus Torvalds
+ *
+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ *
+ *  Memory region support
+ *     David Parsons <orc@pell.chi.il.us>, July-August 1999
+ *
+ *  Added E820 sanitization routine (removes overlapping memory regions);
+ *  Brian Moyle <bmoyle@mvista.com>, February 2001
+ *
+ * Moved CPU detection code to cpu/${cpu}.c
+ *    Patrick Mochel <mochel@osdl.org>, March 2002
+ *
+ *  Provisions for empty E820 memory regions (reported by certain BIOSes).
+ *  Alex Achenbach <xela@slit.de>, December 2002.
+ *
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
+#include <linux/apm_bios.h>
+#include <linux/initrd.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/console.h>
+#include <linux/root_dev.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/edd.h>
+#include <video/edid.h>
+#include <asm/e820.h>
+#include <asm/mpspec.h>
+#include <asm/setup.h>
+#include <asm/arch_hooks.h>
+#include <asm/sections.h>
+#include <asm/io_apic.h>
+#include <asm/ist.h>
+#include <asm/std_resources.h>
+#include <asm/hypervisor.h>
+#include "setup_arch_pre.h"
+
+int disable_pse __initdata = 0;
+
+static inline char * __init machine_specific_memory_setup(void);
+
+/*
+ * Machine setup..
+ */
+
+#ifdef CONFIG_EFI
+int efi_enabled = 0;
+EXPORT_SYMBOL(efi_enabled);
+#endif
+
+/* cpu data as detected by the assembly code in head.S */
+struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+/* common cpu data for all cpus */
+struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+
+unsigned long mmu_cr4_features;
+EXPORT_SYMBOL_GPL(mmu_cr4_features);
+
+#ifdef CONFIG_ACPI_INTERPRETER
+       int acpi_disabled = 0;
+#else
+       int acpi_disabled = 1;
+#endif
+EXPORT_SYMBOL(acpi_disabled);
+
+#ifdef CONFIG_ACPI_BOOT
+int __initdata acpi_force = 0;
+extern acpi_interrupt_flags    acpi_sci_flags;
+#endif
+
+int MCA_bus;
+/* for MCA, but anyone else can use it if they want */
+unsigned int machine_id;
+unsigned int machine_submodel_id;
+unsigned int BIOS_revision;
+unsigned int mca_pentium_flag;
+
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+
+/* user-defined highmem size */
+static unsigned int highmem_pages = -1;
+
+/*
+ * Setup options
+ */
+struct drive_info_struct { char dummy[32]; } drive_info;
+struct screen_info screen_info;
+struct apm_info apm_info;
+struct sys_desc_table_struct {
+       unsigned short length;
+       unsigned char table[0];
+};
+struct edid_info edid_info;
+struct ist_info ist_info;
+struct e820map e820;
+
+unsigned char aux_device_present;
+
+extern void early_cpu_init(void);
+extern void dmi_scan_machine(void);
+extern void generic_apic_probe(char *);
+extern int root_mountflags;
+
+unsigned long saved_videomode;
+
+#define RAMDISK_IMAGE_START_MASK       0x07FF
+#define RAMDISK_PROMPT_FLAG            0x8000
+#define RAMDISK_LOAD_FLAG              0x4000  
+
+static char command_line[COMMAND_LINE_SIZE];
+       char saved_command_line[COMMAND_LINE_SIZE];
+
+unsigned char __initdata boot_params[PARAM_SIZE];
+
+static struct resource code_resource = { "Kernel code", 0x100000, 0 };
+static struct resource data_resource = { "Kernel data", 0, 0 };
+
+/*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+
+unsigned long *phys_to_machine_mapping;
+
+multicall_entry_t multicall_list[8];
+int nr_multicall_ents = 0;
+
+/* Raw start-of-day parameters from the hypervisor. */
+union start_info_union start_info_union;
+
+static void __init limit_regions(unsigned long long size)
+{
+       unsigned long long current_addr = 0;
+       int i;
+
+       if (efi_enabled) {
+               for (i = 0; i < memmap.nr_map; i++) {
+                       current_addr = memmap.map[i].phys_addr +
+                                      (memmap.map[i].num_pages << 12);
+                       if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
+                               if (current_addr >= size) {
+                                       memmap.map[i].num_pages -=
+                                               (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
+                                       memmap.nr_map = i + 1;
+                                       return;
+                               }
+                       }
+               }
+       }
+       for (i = 0; i < e820.nr_map; i++) {
+               if (e820.map[i].type == E820_RAM) {
+                       current_addr = e820.map[i].addr + e820.map[i].size;
+                       if (current_addr >= size) {
+                               e820.map[i].size -= current_addr-size;
+                               e820.nr_map = i + 1;
+                               return;
+                       }
+               }
+       }
+}
+
+static void __init add_memory_region(unsigned long long start,
+                                  unsigned long long size, int type)
+{
+       int x;
+
+       if (!efi_enabled) {
+                       x = e820.nr_map;
+
+               if (x == E820MAX) {
+                   printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+                   return;
+               }
+
+               e820.map[x].addr = start;
+               e820.map[x].size = size;
+               e820.map[x].type = type;
+               e820.nr_map++;
+       }
+} /* add_memory_region */
+
+#define E820_DEBUG     1
+
+static void __init print_memory_map(char *who)
+{
+       int i;
+
+       for (i = 0; i < e820.nr_map; i++) {
+               printk(" %s: %016Lx - %016Lx ", who,
+                       e820.map[i].addr,
+                       e820.map[i].addr + e820.map[i].size);
+               switch (e820.map[i].type) {
+               case E820_RAM:  printk("(usable)\n");
+                               break;
+               case E820_RESERVED:
+                               printk("(reserved)\n");
+                               break;
+               case E820_ACPI:
+                               printk("(ACPI data)\n");
+                               break;
+               case E820_NVS:
+                               printk("(ACPI NVS)\n");
+                               break;
+               default:        printk("type %lu\n", e820.map[i].type);
+                               break;
+               }
+       }
+}
+
+#if 0
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries.  The following 
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+struct change_member {
+       struct e820entry *pbios; /* pointer to original bios entry */
+       unsigned long long addr; /* address for this change point */
+};
+struct change_member change_point_list[2*E820MAX] __initdata;
+struct change_member *change_point[2*E820MAX] __initdata;
+struct e820entry *overlap_list[E820MAX] __initdata;
+struct e820entry new_bios[E820MAX] __initdata;
+
+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+       struct change_member *change_tmp;
+       unsigned long current_type, last_type;
+       unsigned long long last_addr;
+       int chgidx, still_changing;
+       int overlap_entries;
+       int new_bios_entry;
+       int old_nr, new_nr, chg_nr;
+       int i;
+
+       /*
+               Visually we're performing the following (1,2,3,4 = memory types)...
+
+               Sample memory map (w/overlaps):
+                  ____22__________________
+                  ______________________4_
+                  ____1111________________
+                  _44_____________________
+                  11111111________________
+                  ____________________33__
+                  ___________44___________
+                  __________33333_________
+                  ______________22________
+                  ___________________2222_
+                  _________111111111______
+                  _____________________11_
+                  _________________4______
+
+               Sanitized equivalent (no overlap):
+                  1_______________________
+                  _44_____________________
+                  ___1____________________
+                  ____22__________________
+                  ______11________________
+                  _________1______________
+                  __________3_____________
+                  ___________44___________
+                  _____________33_________
+                  _______________2________
+                  ________________1_______
+                  _________________4______
+                  ___________________2____
+                  ____________________33__
+                  ______________________4_
+       */
+
+       /* if there's only one memory region, don't bother */
+       if (*pnr_map < 2)
+               return -1;
+
+       old_nr = *pnr_map;
+
+       /* bail out if we find any unreasonable addresses in bios map */
+       for (i=0; i<old_nr; i++)
+               if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+                       return -1;
+
+       /* create pointers for initial change-point information (for sorting) */
+       for (i=0; i < 2*old_nr; i++)
+               change_point[i] = &change_point_list[i];
+
+       /* record all known change-points (starting and ending addresses),
+          omitting those that are for empty memory regions */
+       chgidx = 0;
+       for (i=0; i < old_nr; i++)      {
+               if (biosmap[i].size != 0) {
+                       change_point[chgidx]->addr = biosmap[i].addr;
+                       change_point[chgidx++]->pbios = &biosmap[i];
+                       change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+                       change_point[chgidx++]->pbios = &biosmap[i];
+               }
+       }
+       chg_nr = chgidx;        /* true number of change-points */
+
+       /* sort change-point list by memory addresses (low -> high) */
+       still_changing = 1;
+       while (still_changing)  {
+               still_changing = 0;
+               for (i=1; i < chg_nr; i++)  {
+                       /* if <current_addr> > <last_addr>, swap */
+                       /* or, if current=<start_addr> & last=<end_addr>, swap */
+                       if ((change_point[i]->addr < change_point[i-1]->addr) ||
+                               ((change_point[i]->addr == change_point[i-1]->addr) &&
+                                (change_point[i]->addr == change_point[i]->pbios->addr) &&
+                                (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+                          )
+                       {
+                               change_tmp = change_point[i];
+                               change_point[i] = change_point[i-1];
+                               change_point[i-1] = change_tmp;
+                               still_changing=1;
+                       }
+               }
+       }
+
+       /* create a new bios memory map, removing overlaps */
+       overlap_entries=0;       /* number of entries in the overlap table */
+       new_bios_entry=0;        /* index for creating new bios map entries */
+       last_type = 0;           /* start with undefined memory type */
+       last_addr = 0;           /* start with 0 as last starting address */
+       /* loop through change-points, determining affect on the new bios map */
+       for (chgidx=0; chgidx < chg_nr; chgidx++)
+       {
+               /* keep track of all overlapping bios entries */
+               if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+               {
+                       /* add map entry to overlap list (> 1 entry implies an overlap) */
+                       overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+               }
+               else
+               {
+                       /* remove entry from list (order independent, so swap with last) */
+                       for (i=0; i<overlap_entries; i++)
+                       {
+                               if (overlap_list[i] == change_point[chgidx]->pbios)
+                                       overlap_list[i] = overlap_list[overlap_entries-1];
+                       }
+                       overlap_entries--;
+               }
+               /* if there are overlapping entries, decide which "type" to use */
+               /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+               current_type = 0;
+               for (i=0; i<overlap_entries; i++)
+                       if (overlap_list[i]->type > current_type)
+                               current_type = overlap_list[i]->type;
+               /* continue building up new bios map based on this information */
+               if (current_type != last_type)  {
+                       if (last_type != 0)      {
+                               new_bios[new_bios_entry].size =
+                                       change_point[chgidx]->addr - last_addr;
+                               /* move forward only if the new size was non-zero */
+                               if (new_bios[new_bios_entry].size != 0)
+                                       if (++new_bios_entry >= E820MAX)
+                                               break;  /* no more space left for new bios entries */
+                       }
+                       if (current_type != 0)  {
+                               new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+                               new_bios[new_bios_entry].type = current_type;
+                               last_addr=change_point[chgidx]->addr;
+                       }
+                       last_type = current_type;
+               }
+       }
+       new_nr = new_bios_entry;   /* retain count for new bios entries */
+
+       /* copy new bios mapping into original location */
+       memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+       *pnr_map = new_nr;
+
+       return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory.  If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+       /* Only one memory region (or negative)? Ignore it */
+       if (nr_map < 2)
+               return -1;
+
+       do {
+               unsigned long long start = biosmap->addr;
+               unsigned long long size = biosmap->size;
+               unsigned long long end = start + size;
+               unsigned long type = biosmap->type;
+
+               /* Overflow in 64 bits? Ignore the memory map. */
+               if (start > end)
+                       return -1;
+
+               /*
+                * Some BIOSes claim RAM in the 640k - 1M region.
+                * Not right. Fix it up.
+                */
+               if (type == E820_RAM) {
+                       if (start < 0x100000ULL && end > 0xA0000ULL) {
+                               if (start < 0xA0000ULL)
+                                       add_memory_region(start, 0xA0000ULL-start, type);
+                               if (end <= 0x100000ULL)
+                                       continue;
+                               start = 0x100000ULL;
+                               size = end - start;
+                       }
+               }
+               add_memory_region(start, size, type);
+       } while (biosmap++,--nr_map);
+       return 0;
+}
+#endif
+
+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+unsigned char eddnr;
+struct edd_info edd[EDDMAXNR];
+unsigned int edd_disk80_sig;
+#ifdef CONFIG_EDD_MODULE
+EXPORT_SYMBOL(eddnr);
+EXPORT_SYMBOL(edd);
+EXPORT_SYMBOL(edd_disk80_sig);
+#endif
+/**
+ * copy_edd() - Copy the BIOS EDD information
+ *              from boot_params into a safe place.
+ *
+ */
+static inline void copy_edd(void)
+{
+     eddnr = EDD_NR;
+     memcpy(edd, EDD_BUF, sizeof(edd));
+     edd_disk80_sig = DISK80_SIGNATURE;
+}
+#else
+#define copy_edd() do {} while (0)
+#endif
+
+/*
+ * Do NOT EVER look at the BIOS memory size location.
+ * It does not work on many machines.
+ */
+#define LOWMEMSIZE()   (0x9f000)
+
+static void __init setup_memory_region(void)
+{
+       char *who = machine_specific_memory_setup();
+       printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+       print_memory_map(who);
+} /* setup_memory_region */
+
+
+static void __init parse_cmdline_early (char ** cmdline_p)
+{
+       char c = ' ', *to = command_line, *from = saved_command_line;
+       int len = 0;
+       int userdef = 0;
+
+        memcpy(saved_command_line, start_info.cmd_line, MAX_CMDLINE);
+       /* Save unparsed command line copy for /proc/cmdline */
+       saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+       for (;;) {
+               /*
+                * "mem=nopentium" disables the 4MB page tables.
+                * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
+                * to <mem>, overriding the bios size.
+                * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
+                * <start> to <start>+<mem>, overriding the bios size.
+                *
+                * HPA tells me bootloaders need to parse mem=, so no new
+                * option should be mem=  [also see Documentation/i386/boot.txt]
+                */
+               if (c == ' ' && !memcmp(from, "mem=", 4)) {
+                       if (to != command_line)
+                               to--;
+                       if (!memcmp(from+4, "nopentium", 9)) {
+                               from += 9+4;
+                               clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+                               disable_pse = 1;
+                       } else {
+                               /* If the user specifies memory size, we
+                                * limit the BIOS-provided memory map to
+                                * that size. exactmap can be used to specify
+                                * the exact map. mem=number can be used to
+                                * trim the existing memory map.
+                                */
+                               unsigned long long mem_size;
+                               mem_size = memparse(from+4, &from);
+                               limit_regions(mem_size);
+                               userdef=1;
+                       }
+               }
+
+               if (c == ' ' && !memcmp(from, "memmap=", 7)) {
+                       if (to != command_line)
+                               to--;
+                       if (!memcmp(from+7, "exactmap", 8)) {
+                               from += 8+7;
+                               e820.nr_map = 0;
+                               userdef = 1;
+                       } else {
+                               /* If the user specifies memory size, we
+                                * limit the BIOS-provided memory map to
+                                * that size. exactmap can be used to specify
+                                * the exact map. mem=number can be used to
+                                * trim the existing memory map.
+                                */
+                               unsigned long long start_at, mem_size;
+                               mem_size = memparse(from+7, &from);
+                               if (*from == '@') {
+                                       start_at = memparse(from+1, &from);
+                                       add_memory_region(start_at, mem_size, E820_RAM);
+                               } else if (*from == '#') {
+                                       start_at = memparse(from+1, &from);
+                                       add_memory_region(start_at, mem_size, E820_ACPI);
+                               } else if (*from == '$') {
+                                       start_at = memparse(from+1, &from);
+                                       add_memory_region(start_at, mem_size, E820_RESERVED);
+                               } else {
+                                       limit_regions(mem_size);
+                                       userdef=1;
+                               }
+                       }
+               }
+
+#ifdef  CONFIG_X86_SMP
+               /*
+                * If the BIOS enumerates physical processors before logical,
+                * maxcpus=N at enumeration-time can be used to disable HT.
+                */
+               else if (!memcmp(from, "maxcpus=", 8)) {
+                       extern unsigned int maxcpus;
+
+                       maxcpus = simple_strtoul(from + 8, NULL, 0);
+               }
+#endif
+
+#ifdef CONFIG_ACPI_BOOT
+               /* "acpi=off" disables both ACPI table parsing and interpreter */
+               else if (!memcmp(from, "acpi=off", 8)) {
+                       disable_acpi();
+               }
+
+               /* acpi=force to over-ride black-list */
+               else if (!memcmp(from, "acpi=force", 10)) {
+                       acpi_force = 1;
+                       acpi_ht = 1;
+                       acpi_disabled = 0;
+               }
+
+               /* acpi=strict disables out-of-spec workarounds */
+               else if (!memcmp(from, "acpi=strict", 11)) {
+                       acpi_strict = 1;
+               }
+
+               /* Limit ACPI just to boot-time to enable HT */
+               else if (!memcmp(from, "acpi=ht", 7)) {
+                       if (!acpi_force)
+                               disable_acpi();
+                       acpi_ht = 1;
+               }
+               
+               /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
+               else if (!memcmp(from, "pci=noacpi", 10)) {
+                       acpi_disable_pci();
+               }
+               /* "acpi=noirq" disables ACPI interrupt routing */
+               else if (!memcmp(from, "acpi=noirq", 10)) {
+                       acpi_noirq_set();
+               }
+
+               else if (!memcmp(from, "acpi_sci=edge", 13))
+                       acpi_sci_flags.trigger =  1;
+
+               else if (!memcmp(from, "acpi_sci=level", 14))
+                       acpi_sci_flags.trigger = 3;
+
+               else if (!memcmp(from, "acpi_sci=high", 13))
+                       acpi_sci_flags.polarity = 1;
+
+               else if (!memcmp(from, "acpi_sci=low", 12))
+                       acpi_sci_flags.polarity = 3;
+
+#ifdef CONFIG_X86_IO_APIC
+               else if (!memcmp(from, "acpi_skip_timer_override", 24))
+                       acpi_skip_timer_override = 1;
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+               /* disable IO-APIC */
+               else if (!memcmp(from, "noapic", 6))
+                       disable_ioapic_setup();
+#endif /* CONFIG_X86_LOCAL_APIC */
+#endif /* CONFIG_ACPI_BOOT */
+
+               /*
+                * highmem=size forces highmem to be exactly 'size' bytes.
+                * This works even on boxes that have no highmem otherwise.
+                * This also works to reduce highmem size on bigger boxes.
+                */
+               if (c == ' ' && !memcmp(from, "highmem=", 8))
+                       highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
+       
+               c = *(from++);
+               if (!c)
+                       break;
+               if (COMMAND_LINE_SIZE <= ++len)
+                       break;
+               *(to++) = c;
+       }
+       *to = '\0';
+       *cmdline_p = command_line;
+       if (userdef) {
+               printk(KERN_INFO "user-defined physical RAM map:\n");
+               print_memory_map("user");
+       }
+}
+
+/*
+ * Callback for efi_memory_walk.
+ */
+static int __init
+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+{
+       unsigned long *max_pfn = arg, pfn;
+
+       if (start < end) {
+               pfn = PFN_UP(end -1);
+               if (pfn > *max_pfn)
+                       *max_pfn = pfn;
+       }
+       return 0;
+}
+
+
+/*
+ * Find the highest page frame number we have available
+ */
+void __init find_max_pfn(void)
+{
+       int i;
+
+       max_pfn = 0;
+       if (efi_enabled) {
+               efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+               return;
+       }
+
+       for (i = 0; i < e820.nr_map; i++) {
+               unsigned long start, end;
+               /* RAM? */
+               if (e820.map[i].type != E820_RAM)
+                       continue;
+               start = PFN_UP(e820.map[i].addr);
+               end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+               if (start >= end)
+                       continue;
+               if (end > max_pfn)
+                       max_pfn = end;
+       }
+}
+
+/*
+ * Determine low and high memory ranges:
+ */
+unsigned long __init find_max_low_pfn(void)
+{
+       unsigned long max_low_pfn;
+
+       max_low_pfn = max_pfn;
+       if (max_low_pfn > MAXMEM_PFN) {
+               if (highmem_pages == -1)
+                       highmem_pages = max_pfn - MAXMEM_PFN;
+               if (highmem_pages + MAXMEM_PFN < max_pfn)
+                       max_pfn = MAXMEM_PFN + highmem_pages;
+               if (highmem_pages + MAXMEM_PFN > max_pfn) {
+                       printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
+                       highmem_pages = 0;
+               }
+               max_low_pfn = MAXMEM_PFN;
+#ifndef CONFIG_HIGHMEM
+               /* Maximum memory usable is what is directly addressable */
+               printk(KERN_WARNING "Warning only %ldMB will be used.\n",
+                                       MAXMEM>>20);
+               if (max_pfn > MAX_NONPAE_PFN)
+                       printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+               else
+                       printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+               max_pfn = MAXMEM_PFN;
+#else /* !CONFIG_HIGHMEM */
+#ifndef CONFIG_X86_PAE
+               if (max_pfn > MAX_NONPAE_PFN) {
+                       max_pfn = MAX_NONPAE_PFN;
+                       printk(KERN_WARNING "Warning only 4GB will be used.\n");
+                       printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+               }
+#endif /* !CONFIG_X86_PAE */
+#endif /* !CONFIG_HIGHMEM */
+       } else {
+               if (highmem_pages == -1)
+                       highmem_pages = 0;
+#ifdef CONFIG_HIGHMEM
+               if (highmem_pages >= max_pfn) {
+                       printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
+                       highmem_pages = 0;
+               }
+               if (highmem_pages) {
+                       if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
+                               printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
+                               highmem_pages = 0;
+                       }
+                       max_low_pfn -= highmem_pages;
+               }
+#else
+               if (highmem_pages)
+                       printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
+#endif
+       }
+       return max_low_pfn;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+
+/*
+ * Free all available memory for boot time allocation.  Used
+ * as a callback function by efi_memory_walk()
+ */
+
+static int __init
+free_available_memory(unsigned long start, unsigned long end, void *arg)
+{
+       /* check max_low_pfn */
+       if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
+               return 0;
+       if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
+               end = (max_low_pfn + 1) << PAGE_SHIFT;
+       if (start < end)
+               free_bootmem(start, end - start);
+
+       return 0;
+}
+/*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+{
+       int i;
+
+       if (efi_enabled) {
+               efi_memmap_walk(free_available_memory, NULL);
+               return;
+       }
+       for (i = 0; i < e820.nr_map; i++) {
+               unsigned long curr_pfn, last_pfn, size;
+               /*
+                * Reserve usable low memory
+                */
+               if (e820.map[i].type != E820_RAM)
+                       continue;
+               /*
+                * We are rounding up the start address of usable memory:
+                */
+               curr_pfn = PFN_UP(e820.map[i].addr);
+               if (curr_pfn >= max_low_pfn)
+                       continue;
+               /*
+                * ... and at the end of the usable range downwards:
+                */
+               last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+
+               if (last_pfn > max_low_pfn)
+                       last_pfn = max_low_pfn;
+
+               /*
+                * .. finally, did all the rounding and playing
+                * around just make the area go away?
+                */
+               if (last_pfn <= curr_pfn)
+                       continue;
+
+               size = last_pfn - curr_pfn;
+               free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+       }
+}
+
+static unsigned long __init setup_memory(void)
+{
+       unsigned long bootmap_size, start_pfn, max_low_pfn;
+
+       /*
+        * partially used pages are not usable - thus
+        * we are rounding upwards:
+        */
+       start_pfn = PFN_UP(__pa(start_info.pt_base)) + start_info.nr_pt_frames;
+
+       find_max_pfn();
+
+       max_low_pfn = find_max_low_pfn();
+
+#ifdef CONFIG_HIGHMEM
+       highstart_pfn = highend_pfn = max_pfn;
+       if (max_pfn > max_low_pfn) {
+               highstart_pfn = max_low_pfn;
+       }
+       printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
+               pages_to_mb(highend_pfn - highstart_pfn));
+#endif
+       printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
+                       pages_to_mb(max_low_pfn));
+       /*
+        * Initialize the boot-time allocator (with low memory only):
+        */
+       bootmap_size = init_bootmem(start_pfn, max_low_pfn);
+
+       register_bootmem_low_pages(max_low_pfn);
+
+       /*
+        * Reserve the bootmem bitmap itself as well. We do this in two
+        * steps (first step was init_bootmem()) because this catches
+        * the (very unlikely) case of us accidentally initializing the
+        * bootmem allocator with an invalid RAM area.
+        */
+       reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
+                        bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
+
+    /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
+       PCI prefetch into it (errata #56). Usually the page is reserved anyways,
+       unless you have no PS/2 mouse plugged in. */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+           boot_cpu_data.x86 == 6)
+            reserve_bootmem(0xa0000 - 4096, 4096);
+
+#ifdef CONFIG_SMP
+       /*
+        * But first pinch a few for the stack/trampoline stuff
+        * FIXME: Don't need the extra page at 4K, but need to fix
+        * trampoline before removing it. (see the GDT stuff)
+        */
+       reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
+#endif
+#ifdef CONFIG_ACPI_SLEEP
+       /*
+        * Reserve low memory region for sleep support.
+        */
+       acpi_reserve_bootmem();
+#endif
+#ifdef CONFIG_X86_FIND_SMP_CONFIG
+       /*
+        * Find and reserve possible boot-time SMP configuration:
+        */
+       find_smp_config();
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (LOADER_TYPE && INITRD_START) {
+               if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
+                       reserve_bootmem(INITRD_START, INITRD_SIZE);
+                       initrd_start =
+                               INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+                       initrd_end = initrd_start+INITRD_SIZE;
+               }
+               else {
+                       printk(KERN_ERR "initrd extends beyond end of memory "
+                           "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+                           INITRD_START + INITRD_SIZE,
+                           max_low_pfn << PAGE_SHIFT);
+                       initrd_start = 0;
+               }
+       }
+#endif
+
+       phys_to_machine_mapping = (unsigned long *)start_info.mfn_list;
+
+       return max_low_pfn;
+}
+#else
+extern unsigned long setup_memory(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+/*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+static void __init
+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+{
+       int i;
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+       probe_roms();
+#endif
+       for (i = 0; i < e820.nr_map; i++) {
+               struct resource *res;
+               if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+                       continue;
+               res = alloc_bootmem_low(sizeof(struct resource));
+               switch (e820.map[i].type) {
+               case E820_RAM:  res->name = "System RAM"; break;
+               case E820_ACPI: res->name = "ACPI Tables"; break;
+               case E820_NVS:  res->name = "ACPI Non-volatile Storage"; break;
+               default:        res->name = "reserved";
+               }
+               res->start = e820.map[i].addr;
+               res->end = res->start + e820.map[i].size - 1;
+               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               request_resource(&iomem_resource, res);
+               if (e820.map[i].type == E820_RAM) {
+                       /*
+                        *  We don't know which RAM region contains kernel data,
+                        *  so we try it repeatedly and let the resource manager
+                        *  test it.
+                        */
+                       request_resource(res, code_resource);
+                       request_resource(res, data_resource);
+               }
+       }
+}
+
+/*
+ * Request address space for all standard resources
+ */
+static void __init register_memory(unsigned long max_low_pfn)
+{
+       unsigned long low_mem_size;
+
+       if (efi_enabled)
+               efi_initialize_iomem_resources(&code_resource, &data_resource);
+       else
+               legacy_init_iomem_resources(&code_resource, &data_resource);
+
+       /* EFI systems may still have VGA */
+       request_graphics_resource();
+
+       /* request I/O space for devices used on all i[345]86 PCs */
+       request_standard_io_resources();
+
+       /* Tell the PCI layer not to allocate too close to the RAM area.. */
+       low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
+       if (low_mem_size > pci_mem_start)
+               pci_mem_start = low_mem_size;
+}
+
+/* Use inline assembly to define this because the nops are defined 
+   as inline assembly strings in the include files and we cannot 
+   get them easily into strings. */
+asm("\t.data\nintelnops: " 
+    GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
+    GENERIC_NOP7 GENERIC_NOP8); 
+asm("\t.data\nk8nops: " 
+    K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
+    K8_NOP7 K8_NOP8); 
+asm("\t.data\nk7nops: " 
+    K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
+    K7_NOP7 K7_NOP8); 
+    
+extern unsigned char intelnops[], k8nops[], k7nops[];
+static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 
+     NULL,
+     intelnops,
+     intelnops + 1,
+     intelnops + 1 + 2,
+     intelnops + 1 + 2 + 3,
+     intelnops + 1 + 2 + 3 + 4,
+     intelnops + 1 + 2 + 3 + 4 + 5,
+     intelnops + 1 + 2 + 3 + 4 + 5 + 6,
+     intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+}; 
+static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 
+     NULL,
+     k8nops,
+     k8nops + 1,
+     k8nops + 1 + 2,
+     k8nops + 1 + 2 + 3,
+     k8nops + 1 + 2 + 3 + 4,
+     k8nops + 1 + 2 + 3 + 4 + 5,
+     k8nops + 1 + 2 + 3 + 4 + 5 + 6,
+     k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+}; 
+static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 
+     NULL,
+     k7nops,
+     k7nops + 1,
+     k7nops + 1 + 2,
+     k7nops + 1 + 2 + 3,
+     k7nops + 1 + 2 + 3 + 4,
+     k7nops + 1 + 2 + 3 + 4 + 5,
+     k7nops + 1 + 2 + 3 + 4 + 5 + 6,
+     k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+}; 
+static struct nop { 
+     int cpuid; 
+     unsigned char **noptable; 
+} noptypes[] = { 
+     { X86_FEATURE_K8, k8_nops }, 
+     { X86_FEATURE_K7, k7_nops }, 
+     { -1, 0 }
+}; 
+
+/* Replace instructions with better alternatives for this CPU type.
+
+   This runs before SMP is initialized to avoid SMP problems with
+   self modifying code. This implies that assymetric systems where
+   APs have less capabilities than the boot processor are not handled. 
+   In this case boot with "noreplacement". */ 
+void apply_alternatives(void *start, void *end) 
+{ 
+       struct alt_instr *a; 
+       int diff, i, k;
+        unsigned char **noptable = intel_nops; 
+       for (i = 0; noptypes[i].cpuid >= 0; i++) { 
+               if (boot_cpu_has(noptypes[i].cpuid)) { 
+                       noptable = noptypes[i].noptable;
+                       break;
+               }
+       } 
+       for (a = start; (void *)a < end; a++) { 
+               if (!boot_cpu_has(a->cpuid))
+                       continue;
+               BUG_ON(a->replacementlen > a->instrlen); 
+               memcpy(a->instr, a->replacement, a->replacementlen); 
+               diff = a->instrlen - a->replacementlen; 
+               /* Pad the rest with nops */
+               for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
+                       k = diff;
+                       if (k > ASM_NOP_MAX)
+                               k = ASM_NOP_MAX;
+                       memcpy(a->instr + i, noptable[k], k); 
+               } 
+       }
+} 
+
+static int no_replacement __initdata = 0; 
+void __init alternative_instructions(void)
+{
+       extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+       if (no_replacement) 
+               return;
+       apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
+
+static int __init noreplacement_setup(char *s)
+{ 
+     no_replacement = 1; 
+     return 0; 
+} 
+
+__setup("noreplacement", noreplacement_setup); 
+
+/*
+ * Determine if we were loaded by an EFI loader.  If so, then we have also been
+ * passed the efi memmap, systab, etc., so we should use these data structures
+ * for initialization.  Note, the efi init code path is determined by the
+ * global efi_enabled. This allows the same kernel image to be used on existing
+ * systems (with a traditional BIOS) as well as on EFI systems.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+       unsigned long max_low_pfn;
+
+       memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
+       early_cpu_init();
+
+       /*
+        * FIXME: This isn't an official loader_type right
+        * now but does currently work with elilo.
+        * If we were configured as an EFI kernel, check to make
+        * sure that we were loaded correctly from elilo and that
+        * the system table is valid.  If not, then initialize normally.
+        */
+#ifdef CONFIG_EFI
+       if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
+               efi_enabled = 1;
+#endif
+
+       ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+       drive_info = DRIVE_INFO;
+       screen_info = SCREEN_INFO;
+       edid_info = EDID_INFO;
+       apm_info.bios = APM_BIOS_INFO;
+       ist_info = IST_INFO;
+       saved_videomode = VIDEO_MODE;
+       if( SYS_DESC_TABLE.length != 0 ) {
+               MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
+               machine_id = SYS_DESC_TABLE.table[0];
+               machine_submodel_id = SYS_DESC_TABLE.table[1];
+               BIOS_revision = SYS_DESC_TABLE.table[2];
+       }
+       aux_device_present = AUX_DEVICE_INFO;
+
+#ifdef CONFIG_BLK_DEV_RAM
+       rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+       rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+       rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+       ARCH_SETUP
+       if (efi_enabled)
+               efi_init();
+       else
+               setup_memory_region();
+
+       copy_edd();
+
+       if (!MOUNT_ROOT_RDONLY)
+               root_mountflags &= ~MS_RDONLY;
+       init_mm.start_code = (unsigned long) _text;
+       init_mm.end_code = (unsigned long) _etext;
+       init_mm.end_data = (unsigned long) _edata;
+       init_mm.brk = (PFN_UP(__pa(start_info.pt_base)) + start_info.nr_pt_frames) << PAGE_SHIFT;
+
+       code_resource.start = virt_to_phys(_text);
+       code_resource.end = virt_to_phys(_etext)-1;
+       data_resource.start = virt_to_phys(_etext);
+       data_resource.end = virt_to_phys(_edata)-1;
+
+       parse_cmdline_early(cmdline_p);
+
+       max_low_pfn = setup_memory();
+
+       /*
+        * NOTE: before this point _nobody_ is allowed to allocate
+        * any memory using the bootmem allocator.
+        */
+
+#ifdef CONFIG_SMP
+       smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
+#endif
+       paging_init();
+
+#ifdef CONFIG_EARLY_PRINTK
+       {
+               char *s = strstr(*cmdline_p, "earlyprintk=");
+               if (s) {
+                       extern void setup_early_printk(char *);
+
+                       setup_early_printk(s);
+                       printk("early console enabled\n");
+               }
+       }
+#endif
+
+
+       dmi_scan_machine();
+
+#ifdef CONFIG_X86_GENERICARCH
+       generic_apic_probe(*cmdline_p);
+#endif 
+       if (efi_enabled)
+               efi_map_memmap();
+
+       /*
+        * Parse the ACPI tables for possible boot-time SMP configuration.
+        */
+       acpi_boot_init();
+
+#ifdef CONFIG_X86_LOCAL_APIC
+       if (smp_found_config)
+               get_smp_config();
+#endif
+
+       register_memory(max_low_pfn);
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+       if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+               conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+       conswitchp = &dummy_con;
+#endif
+#endif
+}
+
+#include "setup_arch_post.h"
+/*
+ * Local Variables:
+ * mode:c
+ * c-file-style:"k&r"
+ * c-basic-offset:8
+ * End:
+ */
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/signal.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/signal.c
new file mode 100644 (file)
index 0000000..4b516bb
--- /dev/null
@@ -0,0 +1,624 @@
+/*
+ *  linux/arch/i386/kernel/signal.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/suspend.h>
+#include <linux/elf.h>
+#include <asm/processor.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/i387.h>
+#include "sigframe.h"
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(int history0, int history1, old_sigset_t mask)
+{
+       struct pt_regs * regs = (struct pt_regs *) &history0;
+       sigset_t saveset;
+
+       mask &= _BLOCKABLE;
+       spin_lock_irq(&current->sighand->siglock);
+       saveset = current->blocked;
+       siginitset(&current->blocked, mask);
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       regs->eax = -EINTR;
+       while (1) {
+               current->state = TASK_INTERRUPTIBLE;
+               schedule();
+               if (do_signal(regs, &saveset))
+                       return -EINTR;
+       }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
+{
+       struct pt_regs * regs = (struct pt_regs *) &unewset;
+       sigset_t saveset, newset;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (copy_from_user(&newset, unewset, sizeof(newset)))
+               return -EFAULT;
+       sigdelsetmask(&newset, ~_BLOCKABLE);
+
+       spin_lock_irq(&current->sighand->siglock);
+       saveset = current->blocked;
+       current->blocked = newset;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       regs->eax = -EINTR;
+       while (1) {
+               current->state = TASK_INTERRUPTIBLE;
+               schedule();
+               if (do_signal(regs, &saveset))
+                       return -EINTR;
+       }
+}
+
+asmlinkage int 
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+             struct old_sigaction __user *oact)
+{
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+
+       if (act) {
+               old_sigset_t mask;
+               if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                       return -EFAULT;
+               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+               __get_user(mask, &act->sa_mask);
+               siginitset(&new_ka.sa.sa_mask, mask);
+       }
+
+       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+       if (!ret && oact) {
+               if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                       return -EFAULT;
+               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+       }
+
+       return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
+{
+       struct pt_regs *regs = (struct pt_regs *) &uss;
+       return do_sigaltstack(uss, uoss, regs->esp);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax)
+{
+       unsigned int err = 0;
+
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+#define COPY(x)                err |= __get_user(regs->x, &sc->x)
+
+#define COPY_SEG(seg)                                                  \
+       { unsigned short tmp;                                           \
+         err |= __get_user(tmp, &sc->seg);                             \
+         regs->x##seg = tmp; }
+
+#define COPY_SEG_STRICT(seg)                                           \
+       { unsigned short tmp;                                           \
+         err |= __get_user(tmp, &sc->seg);                             \
+         regs->x##seg = tmp|3; }
+
+#define GET_SEG(seg)                                                   \
+       { unsigned short tmp;                                           \
+         err |= __get_user(tmp, &sc->seg);                             \
+         loadsegment(seg,tmp); }
+
+#define        FIX_EFLAGS      (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \
+                        X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
+                        X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
+
+       GET_SEG(gs);
+       GET_SEG(fs);
+       COPY_SEG(es);
+       COPY_SEG(ds);
+       COPY(edi);
+       COPY(esi);
+       COPY(ebp);
+       COPY(esp);
+       COPY(ebx);
+       COPY(edx);
+       COPY(ecx);
+       COPY(eip);
+       COPY_SEG_STRICT(cs);
+       COPY_SEG_STRICT(ss);
+       
+       {
+               unsigned int tmpflags;
+               err |= __get_user(tmpflags, &sc->eflags);
+               regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+               regs->orig_eax = -1;            /* disable syscall checks */
+       }
+
+       {
+               struct _fpstate __user * buf;
+               err |= __get_user(buf, &sc->fpstate);
+               if (buf) {
+                       if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+                               goto badframe;
+                       err |= restore_i387(buf);
+               }
+       }
+
+       err |= __get_user(*peax, &sc->eax);
+       return err;
+
+badframe:
+       return 1;
+}
+
+asmlinkage int sys_sigreturn(unsigned long __unused)
+{
+       struct pt_regs *regs = (struct pt_regs *) &__unused;
+       struct sigframe __user *frame = (struct sigframe __user *)(regs->esp - 8);
+       sigset_t set;
+       int eax;
+
+       if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+       if (__get_user(set.sig[0], &frame->sc.oldmask)
+           || (_NSIG_WORDS > 1
+               && __copy_from_user(&set.sig[1], &frame->extramask,
+                                   sizeof(frame->extramask))))
+               goto badframe;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+       
+       if (restore_sigcontext(regs, &frame->sc, &eax))
+               goto badframe;
+       return eax;
+
+badframe:
+       force_sig(SIGSEGV, current);
+       return 0;
+}      
+
+asmlinkage int sys_rt_sigreturn(unsigned long __unused)
+{
+       struct pt_regs *regs = (struct pt_regs *) &__unused;
+       struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->esp - 4);
+       sigset_t set;
+       int eax;
+
+       if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+               goto badframe;
+
+       sigdelsetmask(&set, ~_BLOCKABLE);
+       spin_lock_irq(&current->sighand->siglock);
+       current->blocked = set;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+       
+       if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
+               goto badframe;
+
+       if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT)
+               goto badframe;
+
+       return eax;
+
+badframe:
+       force_sig(SIGSEGV, current);
+       return 0;
+}      
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
+                struct pt_regs *regs, unsigned long mask)
+{
+       int tmp, err = 0;
+
+       tmp = 0;
+       __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
+       err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
+       __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
+       err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
+
+       err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
+       err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
+       err |= __put_user(regs->edi, &sc->edi);
+       err |= __put_user(regs->esi, &sc->esi);
+       err |= __put_user(regs->ebp, &sc->ebp);
+       err |= __put_user(regs->esp, &sc->esp);
+       err |= __put_user(regs->ebx, &sc->ebx);
+       err |= __put_user(regs->edx, &sc->edx);
+       err |= __put_user(regs->ecx, &sc->ecx);
+       err |= __put_user(regs->eax, &sc->eax);
+       err |= __put_user(current->thread.trap_no, &sc->trapno);
+       err |= __put_user(current->thread.error_code, &sc->err);
+       err |= __put_user(regs->eip, &sc->eip);
+       err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
+       err |= __put_user(regs->eflags, &sc->eflags);
+       err |= __put_user(regs->esp, &sc->esp_at_signal);
+       err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);
+
+       tmp = save_i387(fpstate);
+       if (tmp < 0)
+         err = 1;
+       else
+         err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
+
+       /* non-iBCS2 extensions.. */
+       err |= __put_user(mask, &sc->oldmask);
+       err |= __put_user(current->thread.cr2, &sc->cr2);
+
+       return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+{
+       unsigned long esp;
+
+       /* Default to using normal stack */
+       esp = regs->esp;
+
+       /* This is the X/Open sanctioned signal stack switching.  */
+       if (ka->sa.sa_flags & SA_ONSTACK) {
+               if (sas_ss_flags(esp) == 0)
+                       esp = current->sas_ss_sp + current->sas_ss_size;
+       }
+
+       /* This is the legacy signal stack switching. */
+       else if ((regs->xss & 0xffff) != __USER_DS &&
+                !(ka->sa.sa_flags & SA_RESTORER) &&
+                ka->sa.sa_restorer) {
+               esp = (unsigned long) ka->sa.sa_restorer;
+       }
+
+       return (void __user *)((esp - frame_size) & -8ul);
+}
+
+/* These symbols are defined with the addresses in the vsyscall page.
+   See vsyscall-sigreturn.S.  */
+extern void __kernel_sigreturn, __kernel_rt_sigreturn;
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+                       sigset_t *set, struct pt_regs * regs)
+{
+       void *restorer;
+       struct sigframe __user *frame;
+       int err = 0;
+
+       frame = get_sigframe(ka, regs, sizeof(*frame));
+
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               goto give_sigsegv;
+
+       err |= __put_user((current_thread_info()->exec_domain
+                          && current_thread_info()->exec_domain->signal_invmap
+                          && sig < 32
+                          ? current_thread_info()->exec_domain->signal_invmap[sig]
+                          : sig),
+                         &frame->sig);
+       if (err)
+               goto give_sigsegv;
+
+       err |= setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
+       if (err)
+               goto give_sigsegv;
+
+       if (_NSIG_WORDS > 1) {
+               err |= __copy_to_user(&frame->extramask, &set->sig[1],
+                                     sizeof(frame->extramask));
+       }
+       if (err)
+               goto give_sigsegv;
+
+       restorer = &__kernel_sigreturn;
+       if (ka->sa.sa_flags & SA_RESTORER)
+               restorer = ka->sa.sa_restorer;
+
+       /* Set up to return from userspace.  */
+       err |= __put_user(restorer, &frame->pretcode);
+        
+       /*
+        * This is popl %eax ; movl $,%eax ; int $0x80
+        *
+        * WE DO NOT USE IT ANY MORE! It's only left here for historical
+        * reasons and because gdb uses it as a signature to notice
+        * signal handler stack frames.
+        */
+       err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
+       err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
+       err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
+
+       if (err)
+               goto give_sigsegv;
+
+       /* Set up registers for signal handler */
+       regs->esp = (unsigned long) frame;
+       regs->eip = (unsigned long) ka->sa.sa_handler;
+
+       set_fs(USER_DS);
+       regs->xds = __USER_DS;
+       regs->xes = __USER_DS;
+       regs->xss = __USER_DS;
+       regs->xcs = __USER_CS;
+       regs->eflags &= ~TF_MASK;
+
+#if DEBUG_SIG
+       printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+               current->comm, current->pid, frame, regs->eip, frame->pretcode);
+#endif
+
+       return;
+
+give_sigsegv:
+       if (sig == SIGSEGV)
+               ka->sa.sa_handler = SIG_DFL;
+       force_sig(SIGSEGV, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+                          sigset_t *set, struct pt_regs * regs)
+{
+       void *restorer;
+       struct rt_sigframe __user *frame;
+       int err = 0;
+
+       frame = get_sigframe(ka, regs, sizeof(*frame));
+
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               goto give_sigsegv;
+
+       err |= __put_user((current_thread_info()->exec_domain
+                          && current_thread_info()->exec_domain->signal_invmap
+                          && sig < 32
+                          ? current_thread_info()->exec_domain->signal_invmap[sig]
+                          : sig),
+                         &frame->sig);
+       err |= __put_user(&frame->info, &frame->pinfo);
+       err |= __put_user(&frame->uc, &frame->puc);
+       err |= copy_siginfo_to_user(&frame->info, info);
+       if (err)
+               goto give_sigsegv;
+
+       /* Create the ucontext.  */
+       err |= __put_user(0, &frame->uc.uc_flags);
+       err |= __put_user(0, &frame->uc.uc_link);
+       err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+       err |= __put_user(sas_ss_flags(regs->esp),
+                         &frame->uc.uc_stack.ss_flags);
+       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+       err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
+                               regs, set->sig[0]);
+       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+       if (err)
+               goto give_sigsegv;
+
+       /* Set up to return from userspace.  */
+       restorer = &__kernel_rt_sigreturn;
+       if (ka->sa.sa_flags & SA_RESTORER)
+               restorer = ka->sa.sa_restorer;
+       err |= __put_user(restorer, &frame->pretcode);
+        
+       /*
+        * This is movl $,%eax ; int $0x80
+        *
+        * WE DO NOT USE IT ANY MORE! It's only left here for historical
+        * reasons and because gdb uses it as a signature to notice
+        * signal handler stack frames.
+        */
+       err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
+       err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
+       err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
+
+       if (err)
+               goto give_sigsegv;
+
+       /* Set up registers for signal handler */
+       regs->esp = (unsigned long) frame;
+       regs->eip = (unsigned long) ka->sa.sa_handler;
+
+       set_fs(USER_DS);
+       regs->xds = __USER_DS;
+       regs->xes = __USER_DS;
+       regs->xss = __USER_DS;
+       regs->xcs = __USER_CS;
+       regs->eflags &= ~TF_MASK;
+
+#if DEBUG_SIG
+       printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
+               current->comm, current->pid, frame, regs->eip, frame->pretcode);
+#endif
+
+       return;
+
+give_sigsegv:
+       if (sig == SIGSEGV)
+               ka->sa.sa_handler = SIG_DFL;
+       force_sig(SIGSEGV, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */    
+
+static void
+handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
+       struct pt_regs * regs)
+{
+       struct k_sigaction *ka = &current->sighand->action[sig-1];
+
+       /* Are we from a system call? */
+       if (regs->orig_eax >= 0) {
+               /* If so, check system call restarting.. */
+               switch (regs->eax) {
+                       case -ERESTART_RESTARTBLOCK:
+                       case -ERESTARTNOHAND:
+                               regs->eax = -EINTR;
+                               break;
+
+                       case -ERESTARTSYS:
+                               if (!(ka->sa.sa_flags & SA_RESTART)) {
+                                       regs->eax = -EINTR;
+                                       break;
+                               }
+                       /* fallthrough */
+                       case -ERESTARTNOINTR:
+                               regs->eax = regs->orig_eax;
+                               regs->eip -= 2;
+               }
+       }
+
+       /* Set up the stack frame */
+       if (ka->sa.sa_flags & SA_SIGINFO)
+               setup_rt_frame(sig, ka, info, oldset, regs);
+       else
+               setup_frame(sig, ka, oldset, regs);
+
+       if (ka->sa.sa_flags & SA_ONESHOT)
+               ka->sa.sa_handler = SIG_DFL;
+
+       if (!(ka->sa.sa_flags & SA_NODEFER)) {
+               spin_lock_irq(&current->sighand->siglock);
+               sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+               sigaddset(&current->blocked,sig);
+               recalc_sigpending();
+               spin_unlock_irq(&current->sighand->siglock);
+       }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+       siginfo_t info;
+       int signr;
+
+       /*
+        * We want the common case to go fast, which
+        * is why we may in certain cases get here from
+        * kernel mode. Just return without doing anything
+        * if so.
+        */
+       if ((regs->xcs & 2) != 2)
+               return 1;
+
+       if (current->flags & PF_FREEZE) {
+               refrigerator(0);
+               goto no_signal;
+       }
+
+       if (!oldset)
+               oldset = &current->blocked;
+
+       signr = get_signal_to_deliver(&info, regs, NULL);
+       if (signr > 0) {
+               /* Reenable any watchpoints before delivering the
+                * signal to user space. The processor register will
+                * have been cleared if the watchpoint triggered
+                * inside the kernel.
+                */
+               if (current->thread.debugreg[7])
+                       HYPERVISOR_set_debugreg(7,
+                                               current->thread.debugreg[7]);
+
+               /* Whee!  Actually deliver the signal.  */
+               handle_signal(signr, &info, oldset, regs);
+               return 1;
+       }
+
+ no_signal:
+       /* Did we come from a system call? */
+       if (regs->orig_eax >= 0) {
+               /* Restart the system call - no handlers present */
+               if (regs->eax == -ERESTARTNOHAND ||
+                   regs->eax == -ERESTARTSYS ||
+                   regs->eax == -ERESTARTNOINTR) {
+                       regs->eax = regs->orig_eax;
+                       regs->eip -= 2;
+               }
+               if (regs->eax == -ERESTART_RESTARTBLOCK){
+                       regs->eax = __NR_restart_syscall;
+                       regs->eip -= 2;
+               }
+       }
+       return 0;
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+__attribute__((regparm(3)))
+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
+                     __u32 thread_info_flags)
+{
+       /* Pending single-step? */
+       if (thread_info_flags & _TIF_SINGLESTEP) {
+               regs->eflags |= TF_MASK;
+               clear_thread_flag(TIF_SINGLESTEP);
+       }
+       /* deal with pending signal delivery */
+       if (thread_info_flags & _TIF_SIGPENDING)
+               do_signal(regs,oldset);
+       
+       clear_thread_flag(TIF_IRET);
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/sysenter.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/sysenter.c
new file mode 100644 (file)
index 0000000..b1e4990
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * linux/arch/i386/kernel/sysenter.c
+ *
+ * (C) Copyright 2002 Linus Torvalds
+ *
+ * This file contains the needed initializations to support sysenter.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/elf.h>
+
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
+#include <asm/pgtable.h>
+#include <asm/unistd.h>
+
+extern asmlinkage void sysenter_entry(void);
+
+void enable_sep_cpu(void *info)
+{
+       int cpu = get_cpu();
+       struct tss_struct *tss = init_tss + cpu;
+
+       tss->ss1 = __KERNEL_CS;
+       tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
+       wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+       wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
+       wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+       put_cpu();      
+}
+
+/*
+ * These symbols are defined by vsyscall.o to mark the bounds
+ * of the ELF DSO images included therein.
+ */
+extern const char vsyscall_int80_start, vsyscall_int80_end;
+extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+
+static int __init sysenter_setup(void)
+{
+       unsigned long page = get_zeroed_page(GFP_ATOMIC);
+
+       __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY);
+
+       if (1 /* XXXcl not yet */ || !boot_cpu_has(X86_FEATURE_SEP)) {
+               memcpy((void *) page,
+                      &vsyscall_int80_start,
+                      &vsyscall_int80_end - &vsyscall_int80_start);
+               return 0;
+       }
+
+       memcpy((void *) page,
+              &vsyscall_sysenter_start,
+              &vsyscall_sysenter_end - &vsyscall_sysenter_start);
+
+       on_each_cpu(enable_sep_cpu, NULL, 1, 1);
+       return 0;
+}
+
+__initcall(sysenter_setup);
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/time.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/time.c
new file mode 100644 (file)
index 0000000..66b1aff
--- /dev/null
@@ -0,0 +1,412 @@
+/*
+ *  linux/arch/i386/kernel/time.c
+ *
+ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ *
+ * This file contains the PC-specific time handling details:
+ * reading the RTC at bootup, etc..
+ * 1994-07-02    Alan Modra
+ *     fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1995-03-26    Markus Kuhn
+ *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
+ *      precision CMOS clock update
+ * 1996-05-03    Ingo Molnar
+ *      fixed time warps in do_[slow|fast]_gettimeoffset()
+ * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
+ *             "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * 1998-09-05    (Various)
+ *     More robust do_fast_gettimeoffset() algorithm implemented
+ *     (works with APM, Cyrix 6x86MX and Centaur C6),
+ *     monotonic gettimeofday() with fast_get_timeoffset(),
+ *     drift-proof precision TSC calibration on boot
+ *     (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
+ *     Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
+ *     ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
+ * 1998-12-16    Andrea Arcangeli
+ *     Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
+ *     because was not accounting lost_ticks.
+ * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
+ *     Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ *     serialize accesses to xtime/lost_ticks).
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/bcd.h>
+#include <linux/efi.h>
+
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/msr.h>
+#include <asm/delay.h>
+#include <asm/mpspec.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/timer.h>
+
+#include "mach_time.h"
+
+#include <linux/timex.h>
+#include <linux/config.h>
+
+#include <asm/hpet.h>
+
+#include <asm/arch_hooks.h>
+
+#include "io_ports.h"
+
+extern spinlock_t i8259A_lock;
+int pit_latch_buggy;              /* extern */
+
+#include "do_timer.h"
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+unsigned long cpu_khz; /* Detected as we calibrate the TSC */
+
+extern unsigned long wall_jiffies;
+
+spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+
+spinlock_t i8253_lock = SPIN_LOCK_UNLOCKED;
+EXPORT_SYMBOL(i8253_lock);
+
+struct timer_opts *cur_timer = &timer_none;
+
+/*
+ * This version of gettimeofday has microsecond resolution
+ * and better than microsecond precision on fast x86 machines with TSC.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+       unsigned long seq;
+       unsigned long usec, sec;
+       unsigned long max_ntp_tick;
+
+       do {
+               unsigned long lost;
+
+               seq = read_seqbegin(&xtime_lock);
+
+               usec = cur_timer->get_offset();
+               lost = jiffies - wall_jiffies;
+
+               /*
+                * If time_adjust is negative then NTP is slowing the clock
+                * so make sure not to go into next possible interval.
+                * Better to lose some accuracy than have time go backwards..
+                */
+               if (unlikely(time_adjust < 0)) {
+                       max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
+                       usec = min(usec, max_ntp_tick);
+
+                       if (lost)
+                               usec += lost * max_ntp_tick;
+               }
+               else if (unlikely(lost))
+                       usec += lost * (USEC_PER_SEC / HZ);
+
+               sec = xtime.tv_sec;
+               usec += (xtime.tv_nsec / 1000);
+       } while (read_seqretry(&xtime_lock, seq));
+
+       while (usec >= 1000000) {
+               usec -= 1000000;
+               sec++;
+       }
+
+       tv->tv_sec = sec;
+       tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+       time_t wtm_sec, sec = tv->tv_sec;
+       long wtm_nsec, nsec = tv->tv_nsec;
+
+       if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+               return -EINVAL;
+
+       write_seqlock_irq(&xtime_lock);
+       /*
+        * This is revolting. We need to set "xtime" correctly. However, the
+        * value in this location is the value at the most recent update of
+        * wall time.  Discover what correction gettimeofday() would have
+        * made, and then undo it!
+        */
+       nsec -= cur_timer->get_offset() * NSEC_PER_USEC;
+       nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
+
+       wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+       wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+       set_normalized_timespec(&xtime, sec, nsec);
+       set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+       time_adjust = 0;                /* stop active adjtime() */
+       time_status |= STA_UNSYNC;
+       time_maxerror = NTP_PHASE_LIMIT;
+       time_esterror = NTP_PHASE_LIMIT;
+       write_sequnlock_irq(&xtime_lock);
+       clock_was_set();
+       return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+#if 0
+static int set_rtc_mmss(unsigned long nowtime)
+{
+       int retval;
+
+       /* gets recalled with irq locally disabled */
+       spin_lock(&rtc_lock);
+       if (efi_enabled)
+               retval = efi_set_rtc_mmss(nowtime);
+       else
+               retval = mach_set_rtc_mmss(nowtime);
+       spin_unlock(&rtc_lock);
+
+       return retval;
+}
+
+/* last time the cmos clock got updated */
+static long last_rtc_update;
+#endif
+
+int timer_ack;
+
+/* monotonic_clock(): returns # of nanoseconds passed since time_init()
+ *             Note: This function is required to return accurate
+ *             time even in the absence of multiple timer ticks.
+ */
+unsigned long long monotonic_clock(void)
+{
+       return cur_timer->monotonic_clock();
+}
+EXPORT_SYMBOL(monotonic_clock);
+
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static inline void do_timer_interrupt(int irq, void *dev_id,
+                                       struct pt_regs *regs)
+{
+#ifdef CONFIG_X86_IO_APIC
+       if (timer_ack) {
+               /*
+                * Subtle, when I/O APICs are used we have to ack timer IRQ
+                * manually to reset the IRR bit for do_slow_gettimeoffset().
+                * This will also deassert NMI lines for the watchdog if run
+                * on an 82489DX-based system.
+                */
+               spin_lock(&i8259A_lock);
+               outb(0x0c, PIC_MASTER_OCW3);
+               /* Ack the IRQ; AEOI will end it automatically. */
+               inb(PIC_MASTER_POLL);
+               spin_unlock(&i8259A_lock);
+       }
+#endif
+
+       do_timer_interrupt_hook(regs);
+
+#if 0                          /* XEN PRIV */
+       /*
+        * If we have an externally synchronized Linux clock, then update
+        * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+        * called as close as possible to 500 ms before the new second starts.
+        */
+       if ((time_status & STA_UNSYNC) == 0 &&
+           xtime.tv_sec > last_rtc_update + 660 &&
+           (xtime.tv_nsec / 1000)
+                       >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
+           (xtime.tv_nsec / 1000)
+                       <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) {
+               /* horrible...FIXME */
+               if (efi_enabled) {
+                       if (efi_set_rtc_mmss(xtime.tv_sec) == 0)
+                               last_rtc_update = xtime.tv_sec;
+                       else
+                               last_rtc_update = xtime.tv_sec - 600;
+               } else if (set_rtc_mmss(xtime.tv_sec) == 0)
+                       last_rtc_update = xtime.tv_sec;
+               else
+                       last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+       }
+#endif
+
+#ifdef CONFIG_MCA
+       if( MCA_bus ) {
+               /* The PS/2 uses level-triggered interrupts.  You can't
+               turn them off, nor would you want to (any attempt to
+               enable edge-triggered interrupts usually gets intercepted by a
+               special hardware circuit).  Hence we have to acknowledge
+               the timer interrupt.  Through some incredibly stupid
+               design idea, the reset for IRQ 0 is done by setting the
+               high bit of the PPI port B (0x61).  Note that some PS/2s,
+               notably the 55SX, work fine if this is removed.  */
+
+               irq = inb_p( 0x61 );    /* read the current state */
+               outb_p( irq|0x80, 0x61 );       /* reset the IRQ */
+       }
+#endif
+}
+
+/*
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
+ */
+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+       /*
+        * Here we are in the timer irq handler. We just have irqs locally
+        * disabled but we don't know if the timer_bh is running on the other
+        * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+        * the irq version of write_lock because as just said we have irq
+        * locally disabled. -arca
+        */
+       write_seqlock(&xtime_lock);
+
+       cur_timer->mark_offset();
+       if (regs)               /* XXXcl check regs in do_timer_interrupt */
+               do_timer_interrupt(irq, NULL, regs);
+
+       write_sequnlock(&xtime_lock);
+       return IRQ_HANDLED;
+}
+
+/* not static: needed by APM */
+unsigned long get_cmos_time(void)
+{
+       unsigned long retval;
+
+       spin_lock(&rtc_lock);
+
+       if (efi_enabled)
+               retval = efi_get_time();
+       else
+               retval = mach_get_cmos_time();
+
+       spin_unlock(&rtc_lock);
+
+       return retval;
+}
+
+static long clock_cmos_diff;
+
+static int time_suspend(struct sys_device *dev, u32 state)
+{
+       /*
+        * Estimate time zone so that set_time can update the clock
+        */
+       clock_cmos_diff = -get_cmos_time();
+       clock_cmos_diff += get_seconds();
+       return 0;
+}
+
+static int time_resume(struct sys_device *dev)
+{
+       unsigned long sec = get_cmos_time() + clock_cmos_diff;
+       write_seqlock_irq(&xtime_lock);
+       xtime.tv_sec = sec;
+       xtime.tv_nsec = 0;
+       write_sequnlock_irq(&xtime_lock);
+       return 0;
+}
+
+static struct sysdev_class pit_sysclass = {
+       .resume = time_resume,
+       .suspend = time_suspend,
+       set_kset_name("pit"),
+};
+
+
+/* XXX this driverfs stuff should probably go elsewhere later -john */
+static struct sys_device device_i8253 = {
+       .id     = 0,
+       .cls    = &pit_sysclass,
+};
+
+static int time_init_device(void)
+{
+       int error = sysdev_class_register(&pit_sysclass);
+       if (!error)
+               error = sysdev_register(&device_i8253);
+       return error;
+}
+
+device_initcall(time_init_device);
+
+#ifdef CONFIG_HPET_TIMER
+extern void (*late_time_init)(void);
+/* Duplicate of time_init() below, with hpet_enable part added */
+void __init hpet_time_init(void)
+{
+       xtime.tv_sec = get_cmos_time();
+       wall_to_monotonic.tv_sec = -xtime.tv_sec;
+       xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+       wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
+
+       if (hpet_enable() >= 0) {
+               printk("Using HPET for base-timer\n");
+       }
+
+       cur_timer = select_timer();
+       printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+       time_init_hook();
+}
+#endif
+
+/* Dynamically-mapped IRQ. */
+static int time_irq;
+
+static struct irqaction irq_timer = {
+       timer_interrupt, SA_INTERRUPT, 0, "timer",
+       NULL, NULL
+};
+
+void __init time_init(void)
+{
+#ifdef CONFIG_HPET_TIMER
+       if (is_hpet_capable()) {
+               /*
+                * HPET initialization needs to do memory-mapped io. So, let
+                * us do a late initialization after mem_init().
+                */
+               late_time_init = hpet_time_init;
+               return;
+       }
+#endif
+       xtime.tv_sec = HYPERVISOR_shared_info->wc_sec;
+       wall_to_monotonic.tv_sec = -xtime.tv_sec;
+       xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+       wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
+
+       cur_timer = select_timer();
+       printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+       time_irq  = bind_virq_to_irq(VIRQ_TIMER);
+
+       (void)setup_irq(time_irq, &irq_timer);
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/timers/Makefile b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/timers/Makefile
new file mode 100644 (file)
index 0000000..6f7931e
--- /dev/null
@@ -0,0 +1,17 @@
+#
+# Makefile for x86 timers
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+obj-y :=       timer_tsc.o
+c-obj-y :=     timer.o common.o timer_none.o timer_pit.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+       @ln -fsn $(srctree)/arch/i386/kernel/timers/$(notdir $@) $@
+
+obj-y  += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
new file mode 100644 (file)
index 0000000..fb87853
--- /dev/null
@@ -0,0 +1,528 @@
+/*
+ * This code largely moved from arch/i386/kernel/time.c.
+ * See comments there for proper credits.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/timex.h>
+#include <linux/errno.h>
+#include <linux/cpufreq.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+
+#include <asm/timer.h>
+#include <asm/io.h>
+/* processor.h for distable_tsc flag */
+#include <asm/processor.h>
+
+#include "io_ports.h"
+#include "mach_timer.h"
+
+#include <asm/hpet.h>
+
+#ifdef CONFIG_HPET_TIMER
+static unsigned long hpet_usec_quotient;
+static unsigned long hpet_last;
+struct timer_opts timer_tsc;
+#endif
+
+static inline void cpufreq_delayed_get(void);
+
+int tsc_disable __initdata = 0;
+
+extern spinlock_t i8253_lock;
+
+static int use_tsc;
+/* Number of usecs that the last interrupt was delayed */
+static int delay_at_last_interrupt;
+
+static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
+static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
+static unsigned long long monotonic_base;
+static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
+
+/* convert from cycles(64bits) => nanoseconds (64bits)
+ *  basic equation:
+ *             ns = cycles / (freq / ns_per_sec)
+ *             ns = cycles * (ns_per_sec / freq)
+ *             ns = cycles * (10^9 / (cpu_mhz * 10^6))
+ *             ns = cycles * (10^3 / cpu_mhz)
+ *
+ *     Then we use scaling math (suggested by george@mvista.com) to get:
+ *             ns = cycles * (10^3 * SC / cpu_mhz) / SC
+ *             ns = cycles * cyc2ns_scale / SC
+ *
+ *     And since SC is a constant power of two, we can convert the div
+ *  into a shift.   
+ *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+static unsigned long cyc2ns_scale; 
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
+{
+       cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
+}
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+       return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+
+#if 0
+static int count2; /* counter for mark_offset_tsc() */
+#endif
+
+/* Cached *multiplier* to convert TSC counts to microseconds.
+ * (see the equation below).
+ * Equal to 2^32 * (1 / (clocks per usec) ).
+ * Initialized in time_init.
+ */
+static unsigned long fast_gettimeoffset_quotient;
+
+
+/* These are peridically updated in shared_info, and then copied here. */
+static u32 shadow_tsc_stamp;
+static u64 shadow_system_time;
+static u32 shadow_time_version;
+static struct timeval shadow_tv;
+
+/*
+ * Reads a consistent set of time-base values from Xen, into a shadow data
+ * area. Must be called with the xtime_lock held for writing.
+ */
+static void __get_time_values_from_xen(void)
+{
+       do {
+               shadow_time_version = HYPERVISOR_shared_info->time_version2;
+               rmb();
+               shadow_tv.tv_sec    = HYPERVISOR_shared_info->wc_sec;
+               shadow_tv.tv_usec   = HYPERVISOR_shared_info->wc_usec;
+               shadow_tsc_stamp    = HYPERVISOR_shared_info->tsc_timestamp.tsc_bits;
+               shadow_system_time  = HYPERVISOR_shared_info->system_time;
+               rmb();
+       }
+       while (shadow_time_version != HYPERVISOR_shared_info->time_version1);
+}
+
+#define TIME_VALUES_UP_TO_DATE \
+       (shadow_time_version == HYPERVISOR_shared_info->time_version2)
+
+
+static unsigned long get_offset_tsc(void)
+{
+       register unsigned long eax, edx;
+
+       /* Read the Time Stamp Counter */
+
+       rdtsc(eax,edx);
+
+       /* .. relative to previous jiffy (32 bits is enough) */
+       eax -= last_tsc_low;    /* tsc_low delta */
+
+       /*
+         * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
+         *             = (tsc_low delta) * (usecs_per_clock)
+         *             = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
+        *
+        * Using a mull instead of a divl saves up to 31 clock cycles
+        * in the critical path.
+         */
+
+       __asm__("mull %2"
+               :"=a" (eax), "=d" (edx)
+               :"rm" (fast_gettimeoffset_quotient),
+                "0" (eax));
+
+       /* our adjusted time offset in microseconds */
+       return delay_at_last_interrupt + edx;
+}
+
+static unsigned long long monotonic_clock_tsc(void)
+{
+       unsigned long long last_offset, this_offset, base;
+       unsigned seq;
+       
+       /* atomically read monotonic base & last_offset */
+       do {
+               seq = read_seqbegin(&monotonic_lock);
+               last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
+               base = monotonic_base;
+       } while (read_seqretry(&monotonic_lock, seq));
+
+       /* Read the Time Stamp Counter */
+       rdtscll(this_offset);
+
+       /* return the value in ns */
+       return base + cycles_2_ns(this_offset - last_offset);
+}
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ */
+unsigned long long sched_clock(void)
+{
+       unsigned long long this_offset;
+
+       /*
+        * In the NUMA case we dont use the TSC as they are not
+        * synchronized across all CPUs.
+        */
+#ifndef CONFIG_NUMA
+       if (!use_tsc)
+#endif
+               /* no locking but a rare wrong value is not a big deal */
+               return jiffies_64 * (1000000000 / HZ);
+
+       /* Read the Time Stamp Counter */
+       rdtscll(this_offset);
+
+       /* return the value in ns */
+       return cycles_2_ns(this_offset);
+}
+
+
+static void mark_offset_tsc(void)
+{
+       unsigned long lost,delay;
+       unsigned long delta = last_tsc_low;
+#if 0
+       int count;
+       int countmp;
+       static int count1 = 0;
+#endif
+       unsigned long long this_offset, last_offset;
+       static int lost_count = 0;
+       
+       write_seqlock(&monotonic_lock);
+       last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
+       /*
+        * It is important that these two operations happen almost at
+        * the same time. We do the RDTSC stuff first, since it's
+        * faster. To avoid any inconsistencies, we need interrupts
+        * disabled locally.
+        */
+
+       /*
+        * Interrupts are just disabled locally since the timer irq
+        * has the SA_INTERRUPT flag set. -arca
+        */
+       
+       /* read Pentium cycle counter */
+
+       rdtsc(last_tsc_low, last_tsc_high);
+
+#if 0
+       spin_lock(&i8253_lock);
+       outb_p(0x00, PIT_MODE);     /* latch the count ASAP */
+
+       count = inb_p(PIT_CH0);    /* read the latched count */
+       count |= inb(PIT_CH0) << 8;
+
+       /*
+        * VIA686a test code... reset the latch if count > max + 1
+        * from timer_pit.c - cjb
+        */
+       if (count > LATCH) {
+               outb_p(0x34, PIT_MODE);
+               outb_p(LATCH & 0xff, PIT_CH0);
+               outb(LATCH >> 8, PIT_CH0);
+               count = LATCH - 1;
+       }
+
+       spin_unlock(&i8253_lock);
+
+       if (pit_latch_buggy) {
+               /* get center value of last 3 time lutch */
+               if ((count2 >= count && count >= count1)
+                   || (count1 >= count && count >= count2)) {
+                       count2 = count1; count1 = count;
+               } else if ((count1 >= count2 && count2 >= count)
+                          || (count >= count2 && count2 >= count1)) {
+                       countmp = count;count = count2;
+                       count2 = count1;count1 = countmp;
+               } else {
+                       count2 = count1; count1 = count; count = count1;
+               }
+       }
+#endif
+
+       /* lost tick compensation */
+       delta = last_tsc_low - delta;
+       {
+               register unsigned long eax, edx;
+               eax = delta;
+               __asm__("mull %2"
+               :"=a" (eax), "=d" (edx)
+               :"rm" (fast_gettimeoffset_quotient),
+                "0" (eax));
+               delta = edx;
+       }
+       delta += delay_at_last_interrupt;
+       lost = delta/(1000000/HZ);
+       delay = delta%(1000000/HZ);
+       if (lost >= 2) {
+               jiffies_64 += lost-1;
+
+               /* sanity check to ensure we're not always losing ticks */
+               if (lost_count++ > 100) {
+                       printk(KERN_WARNING "Losing too many ticks!\n");
+                       printk(KERN_WARNING "TSC cannot be used as a timesource.  \n");
+                       printk(KERN_WARNING "Possible reasons for this are:\n");
+                       printk(KERN_WARNING "  You're running with Speedstep,\n");
+                       printk(KERN_WARNING "  You don't have DMA enabled for your hard disk (see hdparm),\n");
+                       printk(KERN_WARNING "  Incorrect TSC synchronization on an SMP system (see dmesg).\n");
+                       printk(KERN_WARNING "Falling back to a sane timesource now.\n");
+
+                       clock_fallback();
+               }
+               /* ... but give the TSC a fair chance */
+               if (lost_count > 25)
+                       cpufreq_delayed_get();
+       } else
+               lost_count = 0;
+       /* update the monotonic base value */
+       this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
+       monotonic_base += cycles_2_ns(this_offset - last_offset);
+       write_sequnlock(&monotonic_lock);
+
+#if 0
+       /* calculate delay_at_last_interrupt */
+       count = ((LATCH-1) - count) * TICK_SIZE;
+       delay_at_last_interrupt = (count + LATCH/2) / LATCH;
+#else
+       delay_at_last_interrupt = 0;
+#endif
+
+       /* catch corner case where tick rollover occured 
+        * between tsc and pit reads (as noted when 
+        * usec delta is > 90% # of usecs/tick)
+        */
+       if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))
+               jiffies_64++;
+}
+
+static void delay_tsc(unsigned long loops)
+{
+       unsigned long bclock, now;
+       
+       rdtscl(bclock);
+       do
+       {
+               rep_nop();
+               rdtscl(now);
+       } while ((now-bclock) < loops);
+}
+
+#ifdef CONFIG_HPET_TIMER
+static void mark_offset_tsc_hpet(void)
+{
+       unsigned long long this_offset, last_offset;
+       unsigned long offset, temp, hpet_current;
+
+       write_seqlock(&monotonic_lock);
+       last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
+       /*
+        * It is important that these two operations happen almost at
+        * the same time. We do the RDTSC stuff first, since it's
+        * faster. To avoid any inconsistencies, we need interrupts
+        * disabled locally.
+        */
+       /*
+        * Interrupts are just disabled locally since the timer irq
+        * has the SA_INTERRUPT flag set. -arca
+        */
+       /* read Pentium cycle counter */
+
+       hpet_current = hpet_readl(HPET_COUNTER);
+       rdtsc(last_tsc_low, last_tsc_high);
+
+       /* lost tick compensation */
+       offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
+       if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) {
+               int lost_ticks = (offset - hpet_last) / hpet_tick;
+               jiffies_64 += lost_ticks;
+       }
+       hpet_last = hpet_current;
+
+       /* update the monotonic base value */
+       this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
+       monotonic_base += cycles_2_ns(this_offset - last_offset);
+       write_sequnlock(&monotonic_lock);
+
+       /* calculate delay_at_last_interrupt */
+       /*
+        * Time offset = (hpet delta) * ( usecs per HPET clock )
+        *             = (hpet delta) * ( usecs per tick / HPET clocks per tick)
+        *             = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
+        * Where,
+        * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
+        */
+       delay_at_last_interrupt = hpet_current - offset;
+       ASM_MUL64_REG(temp, delay_at_last_interrupt,
+                       hpet_usec_quotient, delay_at_last_interrupt);
+}
+#endif
+
+
+#ifdef CONFIG_CPU_FREQ
+#include <linux/workqueue.h>
+
+static unsigned int cpufreq_delayed_issched = 0;
+static unsigned int cpufreq_init = 0;
+static struct work_struct cpufreq_delayed_get_work;
+
+static void handle_cpufreq_delayed_get(void *v)
+{
+       unsigned int cpu;
+       for_each_online_cpu(cpu) {
+               cpufreq_get(cpu);
+       }
+       cpufreq_delayed_issched = 0;
+}
+
+/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
+ * to verify the CPU frequency the timing core thinks the CPU is running
+ * at is still correct.
+ */
+static inline void cpufreq_delayed_get(void) 
+{
+       if (cpufreq_init && !cpufreq_delayed_issched) {
+               cpufreq_delayed_issched = 1;
+               printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
+               schedule_work(&cpufreq_delayed_get_work);
+       }
+}
+
+/* If the CPU frequency is scaled, TSC-based delays will need a different
+ * loops_per_jiffy value to function properly.
+ */
+
+static unsigned int  ref_freq = 0;
+static unsigned long loops_per_jiffy_ref = 0;
+
+#ifndef CONFIG_SMP
+static unsigned long fast_gettimeoffset_ref = 0;
+static unsigned long cpu_khz_ref = 0;
+#endif
+
+static int
+time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+                      void *data)
+{
+       struct cpufreq_freqs *freq = data;
+
+       write_seqlock_irq(&xtime_lock);
+       if (!ref_freq) {
+               ref_freq = freq->old;
+               loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
+#ifndef CONFIG_SMP
+               fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
+               cpu_khz_ref = cpu_khz;
+#endif
+       }
+
+       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+           (val == CPUFREQ_RESUMECHANGE)) {
+               if (!(freq->flags & CPUFREQ_CONST_LOOPS))
+                       cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
+#ifndef CONFIG_SMP
+               if (cpu_khz)
+                       cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
+               if (use_tsc) {
+                       if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
+                               fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
+                               set_cyc2ns_scale(cpu_khz/1000);
+                       }
+               }
+#endif
+       }
+       write_sequnlock_irq(&xtime_lock);
+
+       return 0;
+}
+
+static struct notifier_block time_cpufreq_notifier_block = {
+       .notifier_call  = time_cpufreq_notifier
+};
+
+
+static int __init cpufreq_tsc(void)
+{
+       int ret;
+       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+       ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
+       if (!ret)
+               cpufreq_init = 1;
+       return ret;
+}
+core_initcall(cpufreq_tsc);
+
+#else /* CONFIG_CPU_FREQ */
+static inline void cpufreq_delayed_get(void) { return; }
+#endif 
+
+
+static int __init init_tsc(char* override)
+{
+       unsigned long long alarm;
+       u64 __cpu_khz;
+
+       __cpu_khz = HYPERVISOR_shared_info->cpu_freq;
+       do_div(__cpu_khz, 1000);
+       cpu_khz = (u32)__cpu_khz;
+       printk(KERN_INFO "Xen reported: %lu.%03lu MHz processor.\n", 
+              cpu_khz / 1000, cpu_khz % 1000);
+
+       /* (10^6 * 2^32) / cpu_khz = (2^32 * 1 / (clocks/us)) */
+       {       
+               unsigned long eax=0, edx=1000;
+               __asm__("divl %2"
+                   :"=a" (fast_gettimeoffset_quotient), "=d" (edx)
+                   :"r" (cpu_khz),
+                   "0" (eax), "1" (edx));
+       }
+
+       set_cyc2ns_scale(cpu_khz/1000);
+
+       __get_time_values_from_xen();
+
+       rdtscll(alarm);
+
+       return 0;
+}
+
+#ifndef CONFIG_X86_TSC
+/* disable flag for tsc.  Takes effect by clearing the TSC cpu flag
+ * in cpu/common.c */
+static int __init tsc_setup(char *str)
+{
+       tsc_disable = 1;
+       return 1;
+}
+#else
+static int __init tsc_setup(char *str)
+{
+       printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
+                               "cannot disable TSC.\n");
+       return 1;
+}
+#endif
+__setup("notsc", tsc_setup);
+
+
+
+/************************************************************/
+
+/* tsc timer_opts struct */
+struct timer_opts timer_tsc = {
+       .name =         "tsc",
+       .init =         init_tsc,
+       .mark_offset =  mark_offset_tsc, 
+       .get_offset =   get_offset_tsc,
+       .monotonic_clock =      monotonic_clock_tsc,
+       .delay = delay_tsc,
+};
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/traps.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/traps.c
new file mode 100644 (file)
index 0000000..4f08115
--- /dev/null
@@ -0,0 +1,1040 @@
+/*
+ *  linux/arch/i386/traps.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Pentium III FXSR, SSE support
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kallsyms.h>
+#include <linux/ptrace.h>
+#include <linux/version.h>
+
+#ifdef CONFIG_EISA
+#include <linux/ioport.h>
+#include <linux/eisa.h>
+#endif
+
+#ifdef CONFIG_MCA
+#include <linux/mca.h>
+#endif
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/debugreg.h>
+#include <asm/desc.h>
+#include <asm/i387.h>
+#include <asm/nmi.h>
+
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/arch_hooks.h>
+
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include "mach_traps.h"
+
+asmlinkage int system_call(void);
+asmlinkage void lcall7(void);
+asmlinkage void lcall27(void);
+
+asmlinkage void safe_page_fault(void);
+
+/* Do we ignore FPU interrupts ? */
+char ignore_fpu_irq = 0;
+
+/*
+ * The IDT has to be page-aligned to simplify the Pentium
+ * F0 0F bug workaround.. We have a special link segment
+ * for this.
+ */
+struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
+
+asmlinkage void divide_error(void);
+asmlinkage void debug(void);
+asmlinkage void nmi(void);
+asmlinkage void int3(void);
+asmlinkage void overflow(void);
+asmlinkage void bounds(void);
+asmlinkage void invalid_op(void);
+asmlinkage void device_not_available(void);
+asmlinkage void double_fault(void);
+asmlinkage void coprocessor_segment_overrun(void);
+asmlinkage void invalid_TSS(void);
+asmlinkage void segment_not_present(void);
+asmlinkage void stack_segment(void);
+asmlinkage void general_protection(void);
+asmlinkage void page_fault(void);
+asmlinkage void coprocessor_error(void);
+asmlinkage void simd_coprocessor_error(void);
+asmlinkage void alignment_check(void);
+asmlinkage void spurious_interrupt_bug(void);
+asmlinkage void machine_check(void);
+
+static int kstack_depth_to_print = 24;
+
+static int valid_stack_ptr(struct task_struct *task, void *p)
+{
+       if (p <= (void *)task->thread_info)
+               return 0;
+       if (kstack_end(p))
+               return 0;
+       return 1;
+}
+
+#ifdef CONFIG_FRAME_POINTER
+void print_context_stack(struct task_struct *task, unsigned long *stack,
+                        unsigned long ebp)
+{
+       unsigned long addr;
+
+       while (valid_stack_ptr(task, (void *)ebp)) {
+               addr = *(unsigned long *)(ebp + 4);
+               printk(" [<%08lx>] ", addr);
+               print_symbol("%s", addr);
+               printk("\n");
+               ebp = *(unsigned long *)ebp;
+       }
+}
+#else
+void print_context_stack(struct task_struct *task, unsigned long *stack,
+                        unsigned long ebp)
+{
+       unsigned long addr;
+
+       while (!kstack_end(stack)) {
+               addr = *stack++;
+               if (kernel_text_address(addr)) {
+                       printk(" %p: [<%08lx>] ", stack - 4, addr);
+                       print_symbol("%s\n", addr);
+               }
+       }
+}
+#endif
+
+void show_trace(struct task_struct *task, unsigned long * stack)
+{
+       unsigned long ebp;
+
+       if (!task)
+               task = current;
+
+       if (!valid_stack_ptr(task, stack)) {
+               printk("Stack pointer is garbage, not printing trace\n");
+               return;
+       }
+
+       if (task == current) {
+               /* Grab ebp right from our regs */
+               asm ("movl %%ebp, %0" : "=r" (ebp) : );
+       } else {
+               /* ebp is the last reg pushed by switch_to */
+               ebp = *(unsigned long *) task->thread.esp;
+       }
+
+       while (1) {
+               struct thread_info *context;
+               context = (struct thread_info *)
+                       ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+               print_context_stack(task, stack, ebp);
+               stack = (unsigned long*)context->previous_esp;
+               if (!stack)
+                       break;
+               printk(" =======================\n");
+       }
+       printk("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *esp)
+{
+       unsigned long *stack;
+       int i;
+
+       if (esp == NULL) {
+               if (task)
+                       esp = (unsigned long*)task->thread.esp;
+               else
+                       esp = (unsigned long *)&esp;
+       }
+
+       stack = esp;
+       for(i = 0; i < kstack_depth_to_print; i++) {
+               if (kstack_end(stack))
+                       break;
+               if (i && ((i % 8) == 0))
+                       printk("\n       ");
+               printk("%08lx ", *stack++);
+       }
+       printk("\nCall Trace:\n");
+       show_trace(task, esp);
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+       unsigned long stack;
+
+       show_trace(current, &stack);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_registers(struct pt_regs *regs)
+{
+       int i;
+       int in_kernel = 1;
+       unsigned long esp;
+       unsigned short ss;
+
+       esp = (unsigned long) (&regs->esp);
+       ss = __KERNEL_DS;
+       if (regs->xcs & 2) {
+               in_kernel = 0;
+               esp = regs->esp;
+               ss = regs->xss & 0xffff;
+       }
+       print_modules();
+       printk("CPU:    %d\nEIP:    %04x:[<%08lx>]    %s\nEFLAGS: %08lx"
+                       "   (%s) \n",
+               smp_processor_id(), 0xffff & regs->xcs, regs->eip,
+               print_tainted(), regs->eflags, UTS_RELEASE);
+       print_symbol("EIP is at %s\n", regs->eip);
+       printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
+               regs->eax, regs->ebx, regs->ecx, regs->edx);
+       printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
+               regs->esi, regs->edi, regs->ebp, esp);
+       printk("ds: %04x   es: %04x   ss: %04x\n",
+               regs->xds & 0xffff, regs->xes & 0xffff, ss);
+       printk("Process %s (pid: %d, threadinfo=%p task=%p)",
+               current->comm, current->pid, current_thread_info(), current);
+       /*
+        * When in-kernel, we also print out the stack and code at the
+        * time of the fault..
+        */
+       if (in_kernel) {
+
+               printk("\nStack: ");
+               show_stack(NULL, (unsigned long*)esp);
+
+               printk("Code: ");
+               if(regs->eip < PAGE_OFFSET)
+                       goto bad;
+
+               for(i=0;i<20;i++)
+               {
+                       unsigned char c;
+                       if(__get_user(c, &((unsigned char*)regs->eip)[i])) {
+bad:
+                               printk(" Bad EIP value.");
+                               break;
+                       }
+                       printk("%02x ", c);
+               }
+       }
+       printk("\n");
+}      
+
+static void handle_BUG(struct pt_regs *regs)
+{
+       unsigned short ud2;
+       unsigned short line;
+       char *file;
+       char c;
+       unsigned long eip;
+
+       if (regs->xcs & 2)
+               goto no_bug;            /* Not in kernel */
+
+       eip = regs->eip;
+
+       if (eip < PAGE_OFFSET)
+               goto no_bug;
+       if (__get_user(ud2, (unsigned short *)eip))
+               goto no_bug;
+       if (ud2 != 0x0b0f)
+               goto no_bug;
+       if (__get_user(line, (unsigned short *)(eip + 2)))
+               goto bug;
+       if (__get_user(file, (char **)(eip + 4)) ||
+               (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+               file = "<bad filename>";
+
+       printk("------------[ cut here ]------------\n");
+       printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
+
+no_bug:
+       return;
+
+       /* Here we know it was a BUG but file-n-line is unavailable */
+bug:
+       printk("Kernel BUG\n");
+}
+
+spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+       static int die_counter;
+       int nl = 0;
+
+       console_verbose();
+       spin_lock_irq(&die_lock);
+       bust_spinlocks(1);
+       handle_BUG(regs);
+       printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+#ifdef CONFIG_PREEMPT
+       printk("PREEMPT ");
+       nl = 1;
+#endif
+#ifdef CONFIG_SMP
+       printk("SMP ");
+       nl = 1;
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       printk("DEBUG_PAGEALLOC");
+       nl = 1;
+#endif
+       if (nl)
+               printk("\n");
+       show_registers(regs);
+       bust_spinlocks(0);
+       spin_unlock_irq(&die_lock);
+       if (in_interrupt())
+               panic("Fatal exception in interrupt");
+
+       if (panic_on_oops) {
+               printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(5 * HZ);
+               panic("Fatal exception");
+       }
+       do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+{
+       if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
+               die(str, regs, err);
+}
+
+static inline unsigned long get_cr2(void)
+{
+       unsigned long address;
+
+       /* get the address */
+       __asm__("movl %%cr2,%0":"=r" (address));
+       return address;
+}
+
+static inline void do_trap(int trapnr, int signr, char *str, int vm86,
+                          struct pt_regs * regs, long error_code, siginfo_t *info)
+{
+       if (regs->eflags & VM_MASK) {
+               if (vm86)
+                       goto vm86_trap;
+               goto trap_signal;
+       }
+
+       if (!(regs->xcs & 2))
+               goto kernel_trap;
+
+       trap_signal: {
+               struct task_struct *tsk = current;
+               tsk->thread.error_code = error_code;
+               tsk->thread.trap_no = trapnr;
+               if (info)
+                       force_sig_info(signr, info, tsk);
+               else
+                       force_sig(signr, tsk);
+               return;
+       }
+
+       kernel_trap: {
+               if (!fixup_exception(regs))
+                       die(str, regs, error_code);
+               return;
+       }
+
+       vm86_trap: {
+               int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
+               if (ret) goto trap_signal;
+               return;
+       }
+}
+
+#define DO_ERROR(trapnr, signr, str, name) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+       do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
+}
+
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+       siginfo_t info; \
+       info.si_signo = signr; \
+       info.si_errno = 0; \
+       info.si_code = sicode; \
+       info.si_addr = (void *)siaddr; \
+       do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
+}
+
+#define DO_VM86_ERROR(trapnr, signr, str, name) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+       do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
+}
+
+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+       siginfo_t info; \
+       info.si_signo = signr; \
+       info.si_errno = 0; \
+       info.si_code = sicode; \
+       info.si_addr = (void *)siaddr; \
+       do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
+}
+
+DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
+DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
+DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
+DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
+DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
+DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
+DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
+DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
+DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+#ifdef CONFIG_X86_MCE
+DO_ERROR(18, SIGBUS, "machine check", machine_check)
+#endif
+
+asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
+{
+       /*
+        * If we trapped on an LDT access then ensure that the default_ldt is
+        * loaded, if nothing else. We load default_ldt lazily because LDT
+        * switching costs time and many applications don't need it.
+        */
+       if (unlikely((error_code & 6) == 4)) {
+               unsigned long ldt;
+               __asm__ __volatile__ ("sldt %0" : "=r" (ldt));
+               if (ldt == 0) {
+                       mmu_update_t u;
+                       u.ptr = MMU_EXTENDED_COMMAND;
+                       u.ptr |= (unsigned long)&default_ldt[0];
+                       u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
+                       if (unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0)) {
+                               show_trace(NULL, (unsigned long *)&u);
+                               panic("Failed to install default LDT");
+                       }
+                       return;
+               }
+       }
+
+#if 0
+       if (regs->eflags & X86_EFLAGS_IF)
+               local_irq_enable();
+#endif
+       if (regs->eflags & VM_MASK)
+               goto gp_in_vm86;
+
+       if (!(regs->xcs & 2))
+               goto gp_in_kernel;
+
+       current->thread.error_code = error_code;
+       current->thread.trap_no = 13;
+       force_sig(SIGSEGV, current);
+       return;
+
+gp_in_vm86:
+       local_irq_enable();
+       handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
+       return;
+
+gp_in_kernel:
+       if (!fixup_exception(regs))
+               die("general protection fault", regs, error_code);
+}
+
+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+{
+       printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
+       printk("You probably have a hardware problem with your RAM chips\n");
+
+       /* Clear and disable the memory parity error line. */
+       clear_mem_error(reason);
+}
+
+static void io_check_error(unsigned char reason, struct pt_regs * regs)
+{
+       unsigned long i;
+
+       printk("NMI: IOCK error (debug interrupt?)\n");
+       show_registers(regs);
+
+       /* Re-enable the IOCK line, wait for a few seconds */
+       reason = (reason & 0xf) | 8;
+       outb(reason, 0x61);
+       i = 2000;
+       while (--i) udelay(1000);
+       reason &= ~8;
+       outb(reason, 0x61);
+}
+
+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+{
+#ifdef CONFIG_MCA
+       /* Might actually be able to figure out what the guilty party
+       * is. */
+       if( MCA_bus ) {
+               mca_handle_nmi();
+               return;
+       }
+#endif
+       printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+               reason, smp_processor_id());
+       printk("Dazed and confused, but trying to continue\n");
+       printk("Do you have a strange power saving mode enabled?\n");
+}
+
+static void default_do_nmi(struct pt_regs * regs)
+{
+       unsigned char reason = get_nmi_reason();
+       if (!(reason & 0xc0)) {
+#ifdef CONFIG_X86_LOCAL_APIC
+               /*
+                * Ok, so this is none of the documented NMI sources,
+                * so it must be the NMI watchdog.
+                */
+               if (nmi_watchdog) {
+                       nmi_watchdog_tick(regs);
+                       return;
+               }
+#endif
+               unknown_nmi_error(reason, regs);
+               return;
+       }
+       if (reason & 0x80)
+               mem_parity_error(reason, regs);
+       if (reason & 0x40)
+               io_check_error(reason, regs);
+       /*
+        * Reassert NMI in case it became active meanwhile
+        * as it's edge-triggered.
+        */
+       reassert_nmi();
+}
+
+static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
+{
+       return 0;
+}
+static nmi_callback_t nmi_callback = dummy_nmi_callback;
+asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
+{
+       int cpu;
+
+       nmi_enter();
+
+       cpu = smp_processor_id();
+       ++nmi_count(cpu);
+
+       if (!nmi_callback(regs, cpu))
+               default_do_nmi(regs);
+
+       nmi_exit();
+}
+
+void set_nmi_callback(nmi_callback_t callback)
+{
+       nmi_callback = callback;
+}
+
+void unset_nmi_callback(void)
+{
+       nmi_callback = dummy_nmi_callback;
+}
+
+/*
+ * Our handling of the processor debug registers is non-trivial.
+ * We do not clear them on entry and exit from the kernel. Therefore
+ * it is possible to get a watchpoint trap here from inside the kernel.
+ * However, the code in ./ptrace.c has ensured that the user can
+ * only set watchpoints on userspace addresses. Therefore the in-kernel
+ * watchpoint trap can only occur in code which is reading/writing
+ * from user space. Such code must not hold kernel locks (since it
+ * can equally take a page fault), therefore it is safe to call
+ * force_sig_info even though that claims and releases locks.
+ * 
+ * Code in ./signal.c ensures that the debug control register
+ * is restored before we deliver any signal, and therefore that
+ * user code runs with the correct debug control register even though
+ * we clear it here.
+ *
+ * Being careful here means that we don't have to be as careful in a
+ * lot of more complicated places (task switching can be a bit lazy
+ * about restoring all the debug state, and ptrace doesn't have to
+ * find every occurrence of the TF bit that could be saved away even
+ * by user code)
+ */
+asmlinkage void do_debug(struct pt_regs * regs, long error_code)
+{
+       unsigned int condition;
+       struct task_struct *tsk = current;
+       siginfo_t info;
+
+       condition = HYPERVISOR_get_debugreg(6);
+
+#if 0
+       /* It's safe to allow irq's after DR6 has been saved */
+       if (regs->eflags & X86_EFLAGS_IF)
+               local_irq_enable();
+#endif
+
+       /* Mask out spurious debug traps due to lazy DR7 setting */
+       if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
+               if (!tsk->thread.debugreg[7])
+                       goto clear_dr7;
+       }
+
+       if (regs->eflags & VM_MASK)
+               goto debug_vm86;
+
+       /* Save debug status register where ptrace can see it */
+       tsk->thread.debugreg[6] = condition;
+
+       /* Mask out spurious TF errors due to lazy TF clearing */
+       if (condition & DR_STEP) {
+               /*
+                * The TF error should be masked out only if the current
+                * process is not traced and if the TRAP flag has been set
+                * previously by a tracing process (condition detected by
+                * the PT_DTRACE flag); remember that the i386 TRAP flag
+                * can be modified by the process itself in user mode,
+                * allowing programs to debug themselves without the ptrace()
+                * interface.
+                */
+               if ((regs->xcs & 2) == 0)
+                       goto clear_TF_reenable;
+               if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
+                       goto clear_TF;
+       }
+
+       /* Ok, finally something we can handle */
+       tsk->thread.trap_no = 1;
+       tsk->thread.error_code = error_code;
+       info.si_signo = SIGTRAP;
+       info.si_errno = 0;
+       info.si_code = TRAP_BRKPT;
+       
+       /* If this is a kernel mode trap, save the user PC on entry to 
+        * the kernel, that's what the debugger can make sense of.
+        */
+       info.si_addr = ((regs->xcs & 2) == 0) ? (void *)tsk->thread.eip : 
+                                               (void *)regs->eip;
+       force_sig_info(SIGTRAP, &info, tsk);
+
+       /* Disable additional traps. They'll be re-enabled when
+        * the signal is delivered.
+        */
+clear_dr7:
+       HYPERVISOR_set_debugreg(7, 0);
+       return;
+
+debug_vm86:
+       handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
+       return;
+
+clear_TF_reenable:
+       set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+clear_TF:
+       regs->eflags &= ~TF_MASK;
+       return;
+}
+
+/*
+ * Note that we play around with the 'TS' bit in an attempt to get
+ * the correct behaviour even in the presence of the asynchronous
+ * IRQ13 behaviour
+ */
+void math_error(void *eip)
+{
+       struct task_struct * task;
+       siginfo_t info;
+       unsigned short cwd, swd;
+
+       /*
+        * Save the info for the exception handler and clear the error.
+        */
+       task = current;
+       save_init_fpu(task);
+       task->thread.trap_no = 16;
+       task->thread.error_code = 0;
+       info.si_signo = SIGFPE;
+       info.si_errno = 0;
+       info.si_code = __SI_FAULT;
+       info.si_addr = eip;
+       /*
+        * (~cwd & swd) will mask out exceptions that are not set to unmasked
+        * status.  0x3f is the exception bits in these regs, 0x200 is the
+        * C1 reg you need in case of a stack fault, 0x040 is the stack
+        * fault bit.  We should only be taking one exception at a time,
+        * so if this combination doesn't produce any single exception,
+        * then we have a bad program that isn't syncronizing its FPU usage
+        * and it will suffer the consequences since we won't be able to
+        * fully reproduce the context of the exception
+        */
+       cwd = get_fpu_cwd(task);
+       swd = get_fpu_swd(task);
+       switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
+               case 0x000:
+               default:
+                       break;
+               case 0x001: /* Invalid Op */
+               case 0x040: /* Stack Fault XXX? */
+               case 0x240: /* Stack Fault | Direction XXX? */
+                       info.si_code = FPE_FLTINV;
+                       /* Should we clear the SF or let user space do it ???? */
+                       break;
+               case 0x002: /* Denormalize */
+               case 0x010: /* Underflow */
+                       info.si_code = FPE_FLTUND;
+                       break;
+               case 0x004: /* Zero Divide */
+                       info.si_code = FPE_FLTDIV;
+                       break;
+               case 0x008: /* Overflow */
+                       info.si_code = FPE_FLTOVF;
+                       break;
+               case 0x020: /* Precision */
+                       info.si_code = FPE_FLTRES;
+                       break;
+       }
+       force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
+{
+       ignore_fpu_irq = 1;
+       math_error((void *)regs->eip);
+}
+
+void simd_math_error(void *eip)
+{
+       struct task_struct * task;
+       siginfo_t info;
+       unsigned short mxcsr;
+
+       /*
+        * Save the info for the exception handler and clear the error.
+        */
+       task = current;
+       save_init_fpu(task);
+       task->thread.trap_no = 19;
+       task->thread.error_code = 0;
+       info.si_signo = SIGFPE;
+       info.si_errno = 0;
+       info.si_code = __SI_FAULT;
+       info.si_addr = eip;
+       /*
+        * The SIMD FPU exceptions are handled a little differently, as there
+        * is only a single status/control register.  Thus, to determine which
+        * unmasked exception was caught we must mask the exception mask bits
+        * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+        */
+       mxcsr = get_fpu_mxcsr(task);
+       switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
+               case 0x000:
+               default:
+                       break;
+               case 0x001: /* Invalid Op */
+                       info.si_code = FPE_FLTINV;
+                       break;
+               case 0x002: /* Denormalize */
+               case 0x010: /* Underflow */
+                       info.si_code = FPE_FLTUND;
+                       break;
+               case 0x004: /* Zero Divide */
+                       info.si_code = FPE_FLTDIV;
+                       break;
+               case 0x008: /* Overflow */
+                       info.si_code = FPE_FLTOVF;
+                       break;
+               case 0x020: /* Precision */
+                       info.si_code = FPE_FLTRES;
+                       break;
+       }
+       force_sig_info(SIGFPE, &info, task);
+}
+
+asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs,
+                                         long error_code)
+{
+       if (cpu_has_xmm) {
+               /* Handle SIMD FPU exceptions on PIII+ processors. */
+               ignore_fpu_irq = 1;
+               simd_math_error((void *)regs->eip);
+       } else {
+               /*
+                * Handle strange cache flush from user space exception
+                * in all other cases.  This is undocumented behaviour.
+                */
+               if (regs->eflags & VM_MASK) {
+                       handle_vm86_fault((struct kernel_vm86_regs *)regs,
+                                         error_code);
+                       return;
+               }
+               die_if_kernel("cache flush denied", regs, error_code);
+               current->thread.trap_no = 19;
+               current->thread.error_code = error_code;
+               force_sig(SIGSEGV, current);
+       }
+}
+
+asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
+                                         long error_code)
+{
+#if 0
+       /* No need to warn about this any longer. */
+       printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
+#endif
+}
+
+/*
+ *  'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task
+ *
+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
+ * Don't touch unless you *really* know how it works.
+ *
+ * Must be called with kernel preemption disabled (in this case,
+ * local interrupts are disabled at the call-site in entry.S).
+ */
+asmlinkage void math_state_restore(struct pt_regs regs)
+{
+       struct thread_info *thread = current_thread_info();
+       struct task_struct *tsk = thread->task;
+
+       /*
+        * A trap in kernel mode can be ignored. It'll be the fast XOR or
+        * copying libraries, which will correctly save/restore state and
+        * reset the TS bit in CR0.
+        */
+       if ((regs.xcs & 2) == 0)
+               return;
+
+       clts();         /* Allow maths ops (or we recurse) */
+       if (!tsk->used_math)
+               init_fpu(tsk);
+       restore_fpu(tsk);
+       thread->status |= TS_USEDFPU;   /* So we fnsave on switch_to() */
+}
+
+#ifndef CONFIG_MATH_EMULATION
+
+asmlinkage void math_emulate(long arg)
+{
+       printk("math-emulation not enabled and no coprocessor found.\n");
+       printk("killing %s.\n",current->comm);
+       force_sig(SIGFPE,current);
+       schedule();
+}
+
+#endif /* CONFIG_MATH_EMULATION */
+
+#ifdef CONFIG_X86_F00F_BUG
+void __init trap_init_f00f_bug(void)
+{
+       __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
+
+       /*
+        * Update the IDT descriptor and reload the IDT so that
+        * it uses the read-only mapped virtual address.
+        */
+       idt_descr.address = fix_to_virt(FIX_F00F_IDT);
+       __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
+}
+#endif
+
+#define _set_gate(gate_addr,type,dpl,addr,seg) \
+do { \
+  int __d0, __d1; \
+  __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+       "movw %4,%%dx\n\t" \
+       "movl %%eax,%0\n\t" \
+       "movl %%edx,%1" \
+       :"=m" (*((long *) (gate_addr))), \
+        "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
+       :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+        "3" ((char *) (addr)),"2" ((seg) << 16)); \
+} while (0)
+
+
+/*
+ * This needs to use 'idt_table' rather than 'idt', and
+ * thus use the _nonmapped_ version of the IDT, as the
+ * Pentium F0 0F bugfix can have resulted in the mapped
+ * IDT being write-protected.
+ */
+void set_intr_gate(unsigned int n, void *addr)
+{
+       _set_gate(idt_table+n,14,0,addr,__KERNEL_CS);
+}
+
+#if 0
+static void __init set_trap_gate(unsigned int n, void *addr)
+{
+       _set_gate(idt_table+n,15,0,addr,__KERNEL_CS);
+}
+
+static void __init set_system_gate(unsigned int n, void *addr)
+{
+       _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
+}
+#endif
+
+static void __init set_call_gate(void *a, void *addr)
+{
+       _set_gate(a,12,3,addr,__KERNEL_CS);
+}
+
+#if 0
+static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
+{
+       _set_gate(idt_table+n,5,0,0,(gdt_entry<<3));
+}
+#endif
+
+
+/* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
+static trap_info_t trap_table[] = {
+       {  0, 0, __KERNEL_CS, (unsigned long)divide_error               },
+       {  1, 0, __KERNEL_CS, (unsigned long)debug                      },
+       {  3, 3, __KERNEL_CS, (unsigned long)int3                       },
+       {  4, 3, __KERNEL_CS, (unsigned long)overflow                   },
+       {  5, 3, __KERNEL_CS, (unsigned long)bounds                     },
+       {  6, 0, __KERNEL_CS, (unsigned long)invalid_op                 },
+       {  7, 0, __KERNEL_CS, (unsigned long)device_not_available       },
+       {  8, 0, __KERNEL_CS, (unsigned long)double_fault               },
+       {  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
+       { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS                },
+       { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present        },
+       { 12, 0, __KERNEL_CS, (unsigned long)stack_segment              },
+       { 13, 0, __KERNEL_CS, (unsigned long)general_protection         },
+       { 14, 0, __KERNEL_CS, (unsigned long)page_fault                 },
+       { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug     },
+       { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error          },
+       { 17, 0, __KERNEL_CS, (unsigned long)alignment_check            },
+#ifdef CONFIG_X86_MCE
+       { 18, 0, __KERNEL_CS, (unsigned long)machine_check              },
+#endif
+       { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error     },
+       { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call    },
+       {  0, 0,           0, 0                                         }
+};
+
+void __init trap_init(void)
+{
+       HYPERVISOR_set_trap_table(trap_table);    
+        HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
+
+       /*
+        * default LDT is a single-entry callgate to lcall7 for iBCS
+        * and a callgate to lcall27 for Solaris/x86 binaries
+        */
+       clear_page(&default_ldt[0]);
+       set_call_gate(&default_ldt[0],lcall7);
+       set_call_gate(&default_ldt[4],lcall27);
+       __make_page_readonly(&default_ldt[0]);
+
+       /*
+        * Should be a barrier for any external CPU state.
+        */
+       cpu_init();
+}
+
+
+/*
+ * install_safe_pf_handler / install_normal_pf_handler:
+ * 
+ * These are used within the failsafe_callback handler in entry.S to avoid
+ * taking a full page fault when reloading FS and GS. This is because FS and 
+ * GS could be invalid at pretty much any point while Xen Linux executes (we 
+ * don't set them to safe values on entry to the kernel). At *any* point Xen 
+ * may be entered due to a hardware interrupt --- on exit from Xen an invalid 
+ * FS/GS will cause our failsafe_callback to be executed. This could occur, 
+ * for example, while the mmmu_update_queue is in an inconsistent state. This
+ * is disastrous because the normal page-fault handler touches the update
+ * queue!
+ * 
+ * Fortunately, within the failsafe handler it is safe to force DS/ES/FS/GS
+ * to zero if they cannot be reloaded -- at this point executing a normal
+ * page fault would not change this effect. The safe page-fault handler
+ * ensures this end result (blow away the selector value) without the dangers
+ * of the normal page-fault handler.
+ * 
+ * NB. Perhaps this can all go away after we have implemented writeable
+ * page tables. :-)
+ */
+
+asmlinkage void do_safe_page_fault(struct pt_regs *regs, 
+                                   unsigned long error_code,
+                                   unsigned long address)
+{
+       if (!fixup_exception(regs))
+               die("Unhandleable 'safe' page fault!", regs, error_code);
+}
+
+unsigned long install_safe_pf_handler(void)
+{
+       static trap_info_t safe_pf[] = { 
+               { 14, 0, __KERNEL_CS, (unsigned long)safe_page_fault },
+               {  0, 0,           0, 0                              }
+       };
+       unsigned long flags;
+       local_irq_save(flags);
+       HYPERVISOR_set_trap_table(safe_pf);
+       return flags; /* This is returned in %%eax */
+}
+
+__attribute__((regparm(3))) /* This function take its arg in %%eax */
+void install_normal_pf_handler(unsigned long flags)
+{
+       static trap_info_t normal_pf[] = { 
+               { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
+               {  0, 0,           0, 0                         }
+       };
+       HYPERVISOR_set_trap_table(normal_pf);
+       local_irq_restore(flags);
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S
new file mode 100644 (file)
index 0000000..829f1c9
--- /dev/null
@@ -0,0 +1,135 @@
+/* ld script to make i386 Linux kernel
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(startup_32)
+jiffies = jiffies_64;
+SECTIONS
+{
+  . = 0xC0000000 + 0x100000;
+  /* read-only */
+  _text = .;                   /* Text and read-only data */
+  .text : {
+       *(.text)
+       SCHED_TEXT
+       *(.fixup)
+       *(.gnu.warning)
+       } = 0x9090
+
+  _etext = .;                  /* End of text section */
+
+  . = ALIGN(16);               /* Exception table */
+  __start___ex_table = .;
+  __ex_table : { *(__ex_table) }
+  __stop___ex_table = .;
+
+  RODATA
+
+  /* writeable */
+  .data : {                    /* Data */
+       *(.data)
+       CONSTRUCTORS
+       }
+
+  . = ALIGN(4096);
+  __nosave_begin = .;
+  .data_nosave : { *(.data.nosave) }
+  . = ALIGN(4096);
+  __nosave_end = .;
+
+  . = ALIGN(4096);
+  .data.page_aligned : { *(.data.idt) }
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  _edata = .;                  /* End of data section */
+
+  . = ALIGN(THREAD_SIZE);      /* init_task */
+  .data.init_task : { *(.data.init_task) }
+
+  /* will be freed after init */
+  . = ALIGN(4096);             /* Init code and data */
+  __init_begin = .;
+  .init.text : { 
+       _sinittext = .;
+       *(.init.text)
+       _einittext = .;
+  }
+  .init.data : { *(.init.data) }
+  . = ALIGN(16);
+  __setup_start = .;
+  .init.setup : { *(.init.setup) }
+  __setup_end = .;
+  __start___param = .;
+  __param : { *(__param) }
+  __stop___param = .;
+  __initcall_start = .;
+  .initcall.init : {
+       *(.initcall1.init) 
+       *(.initcall2.init) 
+       *(.initcall3.init) 
+       *(.initcall4.init) 
+       *(.initcall5.init) 
+       *(.initcall6.init) 
+       *(.initcall7.init)
+  }
+  __initcall_end = .;
+  __con_initcall_start = .;
+  .con_initcall.init : { *(.con_initcall.init) }
+  __con_initcall_end = .;
+  SECURITY_INIT
+  . = ALIGN(4);
+  __alt_instructions = .;
+  .altinstructions : { *(.altinstructions) } 
+  __alt_instructions_end = .; 
+ .altinstr_replacement : { *(.altinstr_replacement) } 
+  /* .exit.text is discard at runtime, not link time, to deal with references
+     from .altinstructions and .eh_frame */
+  .exit.text : { *(.exit.text) }
+  .exit.data : { *(.exit.data) }
+  . = ALIGN(4096);
+  __initramfs_start = .;
+  .init.ramfs : { *(.init.ramfs) }
+  __initramfs_end = .;
+  . = ALIGN(32);
+  __per_cpu_start = .;
+  .data.percpu  : { *(.data.percpu) }
+  __per_cpu_end = .;
+  . = ALIGN(4096);
+  __init_end = .;
+  /* freed after init ends here */
+       
+  __bss_start = .;             /* BSS */
+  .bss : {
+       *(.bss.page_aligned)
+       *(.bss)
+  }
+  . = ALIGN(4);
+  __bss_stop = .; 
+
+  _end = . ;
+
+  /* This is where the kernel creates the early boot page tables */
+  . = ALIGN(4096);
+  pg0 = .;
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+       *(.exitcall.exit)
+       }
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vsyscall.S b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vsyscall.S
new file mode 100644 (file)
index 0000000..f2bcb7f
--- /dev/null
@@ -0,0 +1,15 @@
+#include <linux/init.h>
+
+__INITDATA
+
+       .globl vsyscall_int80_start, vsyscall_int80_end
+vsyscall_int80_start:
+       .incbin "arch/xen/i386/kernel/vsyscall-int80.so"
+vsyscall_int80_end:
+
+       .globl vsyscall_sysenter_start, vsyscall_sysenter_end
+vsyscall_sysenter_start:
+       .incbin "arch/xen/i386/kernel/vsyscall-sysenter.so"
+vsyscall_sysenter_end:
+
+__FINIT
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vsyscall.lds b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
new file mode 100644 (file)
index 0000000..befbdb9
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Linker script for vsyscall DSO.  The vsyscall page is an ELF shared
+ * object prelinked to its virtual address, and with only one read-only
+ * segment (that fits in one page).  This script controls its layout.
+ */
+
+/* This must match <asm/fixmap.h>.  */
+/* = FIXADDR_TOP - PAGE_SIZE
+   = HYPERVISOR_VIRT_START - 2 * PAGE_SIZE - PAGE_SIZE */
+VSYSCALL_BASE = 0xfbffd000;
+
+SECTIONS
+{
+  . = VSYSCALL_BASE + SIZEOF_HEADERS;
+
+  .hash           : { *(.hash) }               :text
+  .dynsym         : { *(.dynsym) }
+  .dynstr         : { *(.dynstr) }
+  .gnu.version    : { *(.gnu.version) }
+  .gnu.version_d  : { *(.gnu.version_d) }
+  .gnu.version_r  : { *(.gnu.version_r) }
+
+  /* This linker script is used both with -r and with -shared.
+     For the layouts to match, we need to skip more than enough
+     space for the dynamic symbol table et al.  If this amount
+     is insufficient, ld -shared will barf.  Just increase it here.  */
+  . = VSYSCALL_BASE + 0x400;
+
+  .text           : { *(.text) }               :text =0x90909090
+
+  .eh_frame_hdr   : { *(.eh_frame_hdr) }       :text :eh_frame_hdr
+  .eh_frame       : { KEEP (*(.eh_frame)) }    :text
+  .dynamic        : { *(.dynamic) }            :text :dynamic
+  .useless        : {
+       *(.got.plt) *(.got)
+       *(.data .data.* .gnu.linkonce.d.*)
+       *(.dynbss)
+       *(.bss .bss.* .gnu.linkonce.b.*)
+  }                                            :text
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+  text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+  dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+  eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+  LINUX_2.5 {
+    global:
+       __kernel_vsyscall;
+       __kernel_sigreturn;
+       __kernel_rt_sigreturn;
+
+    local: *;
+  };
+}
+
+/* The ELF entry point can be used to set the AT_SYSINFO value.  */
+ENTRY(__kernel_vsyscall);
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/Makefile b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/Makefile
new file mode 100644 (file)
index 0000000..c57e9fb
--- /dev/null
@@ -0,0 +1,24 @@
+#
+# Makefile for the linux i386-specific parts of the memory manager.
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+CFLAGS += -Iarch/$(XENARCH)/mm
+
+obj-y  := init.o fault.o pgtable.o hypervisor.o
+c-obj-y        := ioremap.o extable.o pageattr.o 
+
+c-obj-$(CONFIG_DISCONTIGMEM)   += discontig.o
+c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+c-obj-$(CONFIG_HIGHMEM) += highmem.o
+c-obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
+
+c-link :=
+
+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
+       @ln -fsn $(srctree)/arch/i386/mm/$(notdir $@) $@
+
+obj-y  += $(c-obj-y)
+
+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/fault.c b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/fault.c
new file mode 100644 (file)
index 0000000..d850d27
--- /dev/null
@@ -0,0 +1,517 @@
+/*
+ *  linux/arch/i386/mm/fault.c
+ *
+ *  Copyright (C) 1995  Linus Torvalds
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>             /* For unblank_screen() */
+#include <linux/highmem.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/hardirq.h>
+#include <asm/desc.h>
+
+extern void die(const char *,struct pt_regs *,long);
+
+pgd_t *cur_pgd;                        /* XXXsmp */
+
+/*
+ * Unlock any spinlocks which will prevent us from getting the
+ * message out 
+ */
+void bust_spinlocks(int yes)
+{
+       int loglevel_save = console_loglevel;
+
+       if (yes) {
+               oops_in_progress = 1;
+               return;
+       }
+#ifdef CONFIG_VT
+       unblank_screen();
+#endif
+       oops_in_progress = 0;
+       /*
+        * OK, the message is on the console.  Now we call printk()
+        * without oops_in_progress set so that printk will give klogd
+        * a poke.  Hold onto your hats...
+        */
+       console_loglevel = 15;          /* NMI oopser may have shut the console up */
+       printk(" ");
+       console_loglevel = loglevel_save;
+}
+
+/*
+ * Return EIP plus the CS segment base.  The segment limit is also
+ * adjusted, clamped to the kernel/user address space (whichever is
+ * appropriate), and returned in *eip_limit.
+ *
+ * The segment is checked, because it might have been changed by another
+ * task between the original faulting instruction and here.
+ *
+ * If CS is no longer a valid code segment, or if EIP is beyond the
+ * limit, or if it is a kernel address when CS is not a kernel segment,
+ * then the returned value will be greater than *eip_limit.
+ * 
+ * This is slow, but is very rarely executed.
+ */
+static inline unsigned long get_segment_eip(struct pt_regs *regs,
+                                           unsigned long *eip_limit)
+{
+       unsigned long eip = regs->eip;
+       unsigned seg = regs->xcs & 0xffff;
+       u32 seg_ar, seg_limit, base, *desc;
+
+       /* The standard kernel/user address space limit. */
+       *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
+
+       /* Unlikely, but must come before segment checks. */
+       if (unlikely((regs->eflags & VM_MASK) != 0))
+               return eip + (seg << 4);
+       
+       /* By far the most common cases. */
+       if (likely(seg == __USER_CS || seg == __KERNEL_CS))
+               return eip;
+
+       /* Check the segment exists, is within the current LDT/GDT size,
+          that kernel/user (ring 0..3) has the appropriate privilege,
+          that it's a code segment, and get the limit. */
+       __asm__ ("larl %3,%0; lsll %3,%1"
+                : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
+       if ((~seg_ar & 0x9800) || eip > seg_limit) {
+               *eip_limit = 0;
+               return 1;        /* So that returned eip > *eip_limit. */
+       }
+
+       /* Get the GDT/LDT descriptor base. 
+          When you look for races in this code remember that
+          LDT and other horrors are only used in user space. */
+       if (seg & (1<<2)) {
+               /* Must lock the LDT while reading it. */
+               down(&current->mm->context.sem);
+               desc = current->mm->context.ldt;
+               desc = (void *)desc + (seg & ~7);
+       } else {
+               /* Must disable preemption while reading the GDT. */
+               desc = (u32 *)get_cpu_gdt_table(get_cpu());
+               desc = (void *)desc + (seg & ~7);
+       }
+
+       /* Decode the code segment base from the descriptor */
+       base =   (desc[0] >> 16) |
+               ((desc[1] & 0xff) << 16) |
+                (desc[1] & 0xff000000);
+
+       if (seg & (1<<2)) { 
+               up(&current->mm->context.sem);
+       } else
+               put_cpu();
+
+       /* Adjust EIP and segment limit, and clamp at the kernel limit.
+          It's legitimate for segments to wrap at 0xffffffff. */
+       seg_limit += base;
+       if (seg_limit < *eip_limit && seg_limit >= base)
+               *eip_limit = seg_limit;
+       return eip + base;
+}
+
+/* 
+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+ * Check that here and ignore it.
+ */
+static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
+{ 
+       unsigned long limit;
+       unsigned long instr = get_segment_eip (regs, &limit);
+       int scan_more = 1;
+       int prefetch = 0; 
+       int i;
+
+       for (i = 0; scan_more && i < 15; i++) { 
+               unsigned char opcode;
+               unsigned char instr_hi;
+               unsigned char instr_lo;
+
+               if (instr > limit)
+                       break;
+               if (__get_user(opcode, (unsigned char *) instr))
+                       break; 
+
+               instr_hi = opcode & 0xf0; 
+               instr_lo = opcode & 0x0f; 
+               instr++;
+
+               switch (instr_hi) { 
+               case 0x20:
+               case 0x30:
+                       /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
+                       scan_more = ((instr_lo & 7) == 0x6);
+                       break;
+                       
+               case 0x60:
+                       /* 0x64 thru 0x67 are valid prefixes in all modes. */
+                       scan_more = (instr_lo & 0xC) == 0x4;
+                       break;          
+               case 0xF0:
+                       /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
+                       scan_more = !instr_lo || (instr_lo>>1) == 1;
+                       break;                  
+               case 0x00:
+                       /* Prefetch instruction is 0x0F0D or 0x0F18 */
+                       scan_more = 0;
+                       if (instr > limit)
+                               break;
+                       if (__get_user(opcode, (unsigned char *) instr)) 
+                               break;
+                       prefetch = (instr_lo == 0xF) &&
+                               (opcode == 0x0D || opcode == 0x18);
+                       break;                  
+               default:
+                       scan_more = 0;
+                       break;
+               } 
+       }
+       return prefetch;
+}
+
+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr)
+{
+       if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+                    boot_cpu_data.x86 >= 6))
+               return __is_prefetch(regs, addr);
+       return 0;
+} 
+
+asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
+
+/*
+ * This routine handles page faults.  It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * error_code:
+ *     bit 0 == 0 means no page found, 1 means protection fault
+ *     bit 1 == 0 means read, 1 means write
+ *     bit 2 == 0 means kernel, 1 means user-mode
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
+                             unsigned long address)
+{
+       struct task_struct *tsk;
+       struct mm_struct *mm;
+       struct vm_area_struct * vma;
+       unsigned long page;
+       int write;
+       siginfo_t info;
+
+       /* Set the "privileged fault" bit to something sane. */
+       error_code &= 3;
+       error_code |= (regs->xcs & 2) << 1;
+
+#if 0
+       /* It's safe to allow irq's after cr2 has been saved */
+       if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
+               local_irq_enable();
+#endif
+
+       tsk = current;
+
+       info.si_code = SEGV_MAPERR;
+
+       /*
+        * We fault-in kernel-space virtual memory on-demand. The
+        * 'reference' page table is init_mm.pgd.
+        *
+        * NOTE! We MUST NOT take any locks for this case. We may
+        * be in an interrupt or a critical region, and should
+        * only copy the information from the master page table,
+        * nothing more.
+        *
+        * This verifies that the fault happens in kernel space
+        * (error_code & 4) == 0, and that the fault was not a
+        * protection error (error_code & 1) == 0.
+        */
+       if (unlikely(address >= TASK_SIZE)) { 
+               if (!(error_code & 5))
+                       goto vmalloc_fault;
+               /* 
+                * Don't take the mm semaphore here. If we fixup a prefetch
+                * fault we could otherwise deadlock.
+                */
+               goto bad_area_nosemaphore;
+       } 
+
+       mm = tsk->mm;
+
+       /*
+        * If we're in an interrupt, have no user context or are running in an
+        * atomic region then we must not take the fault..
+        */
+       if (in_atomic() || !mm)
+               goto bad_area_nosemaphore;
+
+       down_read(&mm->mmap_sem);
+
+       vma = find_vma(mm, address);
+       if (!vma)
+               goto bad_area;
+       if (vma->vm_start <= address)
+               goto good_area;
+       if (!(vma->vm_flags & VM_GROWSDOWN))
+               goto bad_area;
+       if (error_code & 4) {
+               /*
+                * accessing the stack below %esp is always a bug.
+                * The "+ 32" is there due to some instructions (like
+                * pusha) doing post-decrement on the stack and that
+                * doesn't show up until later..
+                */
+               if (address + 32 < regs->esp)
+                       goto bad_area;
+       }
+       if (expand_stack(vma, address))
+               goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+       info.si_code = SEGV_ACCERR;
+       write = 0;
+       switch (error_code & 3) {
+               default:        /* 3: write, present */
+#ifdef TEST_VERIFY_AREA
+                       if (regs->cs == KERNEL_CS)
+                               printk("WP fault at %08lx\n", regs->eip);
+#endif
+                       /* fall through */
+               case 2:         /* write, not present */
+                       if (!(vma->vm_flags & VM_WRITE))
+                               goto bad_area;
+                       write++;
+                       break;
+               case 1:         /* read, present */
+                       goto bad_area;
+               case 0:         /* read, not present */
+                       if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                               goto bad_area;
+       }
+
+ survive:
+       /*
+        * If for any reason at all we couldn't handle the fault,
+        * make sure we exit gracefully rather than endlessly redo
+        * the fault.
+        */
+       switch (handle_mm_fault(mm, vma, address, write)) {
+               case VM_FAULT_MINOR:
+                       tsk->min_flt++;
+                       break;
+               case VM_FAULT_MAJOR:
+                       tsk->maj_flt++;
+                       break;
+               case VM_FAULT_SIGBUS:
+                       goto do_sigbus;
+               case VM_FAULT_OOM:
+                       goto out_of_memory;
+               default:
+                       BUG();
+       }
+
+       /*
+        * Did it hit the DOS screen memory VA from vm86 mode?
+        */
+       if (regs->eflags & VM_MASK) {
+               unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
+               if (bit < 32)
+                       tsk->thread.screen_bitmap |= 1 << bit;
+       }
+       up_read(&mm->mmap_sem);
+       return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+       up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+       /* User mode accesses just cause a SIGSEGV */
+       if (error_code & 4) {
+               /* 
+                * Valid to do another page fault here because this one came 
+                * from user space.
+                */
+               if (is_prefetch(regs, address))
+                       return;
+
+               tsk->thread.cr2 = address;
+               /* Kernel addresses are always protection faults */
+               tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+               tsk->thread.trap_no = 14;
+               info.si_signo = SIGSEGV;
+               info.si_errno = 0;
+               /* info.si_code has been set above */
+               info.si_addr = (void *)address;
+               force_sig_info(SIGSEGV, &info, tsk);
+               return;
+       }
+
+#ifdef CONFIG_X86_F00F_BUG
+       /*
+        * Pentium F0 0F C7 C8 bug workaround.
+        */
+       if (boot_cpu_data.f00f_bug) {
+               unsigned long nr;
+               
+               nr = (address - idt_descr.address) >> 3;
+
+               if (nr == 6) {
+                       do_invalid_op(regs, 0);
+                       return;
+               }
+       }
+#endif
+
+no_context:
+       /* Are we prepared to handle this kernel fault?  */
+       if (fixup_exception(regs))
+               return;
+
+       /* 
+        * Valid to do another page fault here, because if this fault
+        * had been triggered by is_prefetch fixup_exception would have 
+        * handled it.
+        */
+       if (is_prefetch(regs, address))
+               return;
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+       bust_spinlocks(1);
+
+       if (address < PAGE_SIZE)
+               printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+       else
+               printk(KERN_ALERT "Unable to handle kernel paging request");
+       printk(" at virtual address %08lx\n",address);
+       printk(KERN_ALERT " printing eip:\n");
+       printk("%08lx\n", regs->eip);
+       page = ((unsigned long *) cur_pgd)[address >> 22];
+       printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page, machine_to_phys(page));
+       /*
+        * We must not directly access the pte in the highpte
+        * case, the page table might be allocated in highmem.
+        * And lets rather not kmap-atomic the pte, just in case
+        * it's allocated already.
+        */
+#ifndef CONFIG_HIGHPTE
+       if (page & 1) {
+               page &= PAGE_MASK;
+               address &= 0x003ff000;
+               page = machine_to_phys(page);
+               page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+               printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page, machine_to_phys(page));
+       }
+#endif
+       show_trace(NULL, (unsigned long *)&regs[1]);
+       die("Oops", regs, error_code);
+       bust_spinlocks(0);
+       do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+       up_read(&mm->mmap_sem);
+       if (tsk->pid == 1) {
+               yield();
+               down_read(&mm->mmap_sem);
+               goto survive;
+       }
+       printk("VM: killing process %s\n", tsk->comm);
+       if (error_code & 4)
+               do_exit(SIGKILL);
+       goto no_context;
+
+do_sigbus:
+       up_read(&mm->mmap_sem);
+
+       /* Kernel mode? Handle exceptions or die */
+       if (!(error_code & 4))
+               goto no_context;
+
+       /* User space => ok to do another page fault */
+       if (is_prefetch(regs, address))
+               return;
+
+       tsk->thread.cr2 = address;
+       tsk->thread.error_code = error_code;
+       tsk->thread.trap_no = 14;
+       info.si_signo = SIGBUS;
+       info.si_errno = 0;
+       info.si_code = BUS_ADRERR;
+       info.si_addr = (void *)address;
+       force_sig_info(SIGBUS, &info, tsk);
+       return;
+
+vmalloc_fault:
+       {
+               /*
+                * Synchronize this task's top level page-table
+                * with the 'reference' page table.
+                *
+                * Do _not_ use "tsk" here. We might be inside
+                * an interrupt in the middle of a task switch..
+                */
+               int index = pgd_index(address);
+               pgd_t *pgd, *pgd_k;
+               pmd_t *pmd, *pmd_k;
+               pte_t *pte_k;
+
+               pgd = index + cur_pgd;
+               pgd_k = init_mm.pgd + index;
+
+               if (!pgd_present(*pgd_k))
+                       goto no_context;
+
+               /*
+                * set_pgd(pgd, *pgd_k); here would be useless on PAE
+                * and redundant with the set_pmd() on non-PAE.
+                */
+
+               pmd = pmd_offset(pgd, address);
+               pmd_k = pmd_offset(pgd_k, address);
+               if (!pmd_present(*pmd_k))
+                       goto no_context;
+               set_pmd(pmd, *pmd_k);
+               xen_flush_page_update_queue(); /* flush PMD update */
+
+               pte_k = pte_offset_kernel(pmd_k, address);
+               if (!pte_present(*pte_k))
+                       goto no_context;
+               return;
+       }
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c
new file mode 100644 (file)
index 0000000..35e8c3e
--- /dev/null
@@ -0,0 +1,264 @@
+/******************************************************************************
+ * xen/i386/mm/hypervisor.c
+ * 
+ * Update page tables via the hypervisor.
+ * 
+ * Copyright (c) 2002, K A Fraser
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <asm/hypervisor.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm-xen/multicall.h>
+
+/*
+ * This suffices to protect us if we ever move to SMP domains.
+ * Further, it protects us against interrupts. At the very least, this is
+ * required for the network driver which flushes the update queue before
+ * pushing new receive buffers.
+ */
+static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
+
+#if 0
+#define QUEUE_SIZE 2048
+#else
+#define QUEUE_SIZE 1
+#endif
+static mmu_update_t update_queue[QUEUE_SIZE];
+unsigned int mmu_update_queue_idx = 0;
+#define idx mmu_update_queue_idx
+
+#if MMU_UPDATE_DEBUG > 0
+page_update_debug_t update_debug_queue[QUEUE_SIZE] = {{0}};
+#undef queue_l1_entry_update
+#undef queue_l2_entry_update
+#endif
+#if MMU_UPDATE_DEBUG > 3
+static void DEBUG_allow_pt_reads(void)
+{
+    pte_t *pte;
+    mmu_update_t update;
+    int i;
+    for ( i = idx-1; i >= 0; i-- )
+    {
+        pte = update_debug_queue[i].ptep;
+        if ( pte == NULL ) continue;
+        update_debug_queue[i].ptep = NULL;
+        update.ptr = virt_to_machine(pte);
+        update.val = update_debug_queue[i].pteval;
+        HYPERVISOR_mmu_update(&update, 1, NULL);
+    }
+}
+static void DEBUG_disallow_pt_read(unsigned long va)
+{
+    pte_t *pte;
+    pmd_t *pmd;
+    pgd_t *pgd;
+    unsigned long pteval;
+    /*
+     * We may fault because of an already outstanding update.
+     * That's okay -- it'll get fixed up in the fault handler.
+     */
+    mmu_update_t update;
+    pgd = pgd_offset_k(va);
+    pmd = pmd_offset(pgd, va);
+    pte = pte_offset_kernel(pmd, va); /* XXXcl */
+    update.ptr = virt_to_machine(pte);
+    pteval = *(unsigned long *)pte;
+    update.val = pteval & ~_PAGE_PRESENT;
+    HYPERVISOR_mmu_update(&update, 1, NULL);
+    update_debug_queue[idx].ptep = pte;
+    update_debug_queue[idx].pteval = pteval;
+}
+#endif
+
+#if MMU_UPDATE_DEBUG > 1
+#undef queue_pt_switch
+#undef queue_tlb_flush
+#undef queue_invlpg
+#undef queue_pgd_pin
+#undef queue_pgd_unpin
+#undef queue_pte_pin
+#undef queue_pte_unpin
+#undef queue_set_ldt
+#endif
+
+
+/*
+ * MULTICALL_flush_page_update_queue:
+ *   This is a version of the flush which queues as part of a multicall.
+ */
+void MULTICALL_flush_page_update_queue(void)
+{
+    unsigned long flags;
+    unsigned int _idx;
+    spin_lock_irqsave(&update_lock, flags);
+    if ( (_idx = idx) != 0 ) 
+    {
+#if MMU_UPDATE_DEBUG > 1
+           if (idx > 1)
+        printk("Flushing %d entries from pt update queue\n", idx);
+#endif
+#if MMU_UPDATE_DEBUG > 3
+        DEBUG_allow_pt_reads();
+#endif
+        idx = 0;
+        wmb(); /* Make sure index is cleared first to avoid double updates. */
+        queue_multicall3(__HYPERVISOR_mmu_update, 
+                         (unsigned long)update_queue, 
+                         (unsigned long)_idx, 
+                         (unsigned long)NULL);
+    }
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void __flush_page_update_queue(void)
+{
+    unsigned int _idx = idx;
+#if MMU_UPDATE_DEBUG > 1
+    if (idx > 1)
+    printk("Flushing %d entries from pt update queue\n", idx);
+#endif
+#if MMU_UPDATE_DEBUG > 3
+    DEBUG_allow_pt_reads();
+#endif
+    idx = 0;
+    wmb(); /* Make sure index is cleared first to avoid double updates. */
+    if (unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0))
+       panic("Failed to execute MMU updates");
+}
+
+void _flush_page_update_queue(void)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    if ( idx != 0 ) __flush_page_update_queue();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void increment_index(void)
+{
+    idx++;
+    if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
+}
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+#if MMU_UPDATE_DEBUG > 3
+    DEBUG_disallow_pt_read((unsigned long)ptr);
+#endif
+    update_queue[idx].ptr = virt_to_machine(ptr);
+    update_queue[idx].val = val;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr = virt_to_machine(ptr);
+    update_queue[idx].val = val;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pt_switch(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_NEW_BASEPTR;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_tlb_flush(void)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_TLB_FLUSH;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_invlpg(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
+    update_queue[idx].ptr |= ptr & PAGE_MASK;
+    update_queue[idx].val  = MMUEXT_INVLPG;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_pin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_PIN_L2_TABLE;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_unpin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_pin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_PIN_L1_TABLE;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_unpin(unsigned long ptr)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = phys_to_machine(ptr);
+    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_set_ldt(unsigned long ptr, unsigned long len)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND | ptr;
+    update_queue[idx].val  = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&update_lock, flags);
+    update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+    update_queue[idx].val = pfn;
+    increment_index();
+    spin_unlock_irqrestore(&update_lock, flags);
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/init.c b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/init.c
new file mode 100644 (file)
index 0000000..e7ab62b
--- /dev/null
@@ -0,0 +1,702 @@
+/*
+ *  linux/arch/i386/mm/init.c
+ *
+ *  Copyright (C) 1995  Linus Torvalds
+ *
+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/efi.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/dma.h>
+#include <asm/fixmap.h>
+#include <asm/e820.h>
+#include <asm/apic.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm/hypervisor.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+unsigned long highstart_pfn, highend_pfn;
+
+static int do_test_wp_bit(void);
+
+/*
+ * Creates a middle page table and puts a pointer to it in the
+ * given global directory entry. This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t * __init one_md_table_init(pgd_t *pgd)
+{
+       pmd_t *pmd_table;
+               
+#ifdef CONFIG_X86_PAE
+       pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+       set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+       if (pmd_table != pmd_offset(pgd, 0)) 
+               BUG();
+#else
+       pmd_table = pmd_offset(pgd, 0);
+#endif
+
+       return pmd_table;
+}
+
+/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry.
+ */
+static pte_t * __init one_page_table_init(pmd_t *pmd)
+{
+       if (pmd_none(*pmd)) {
+               pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+               set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+               flush_page_update_queue();
+               if (page_table != pte_offset_kernel(pmd, 0))
+                       BUG();  
+
+               wrprotect_bootpt((pgd_t *)start_info.pt_base, page_table, 1);
+               return page_table;
+       }
+
+       return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This function initializes a certain range of kernel virtual memory 
+ * with new bootmem page tables, everywhere page tables are missing in
+ * the given range.
+ */
+
+/*
+ * NOTE: The pagetables are allocated contiguous on the physical space 
+ * so we can cache the place of the first one and move around without 
+ * checking the pgd every time.
+ */
+static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       int pgd_idx, pmd_idx;
+       unsigned long vaddr;
+
+       vaddr = start;
+       pgd_idx = pgd_index(vaddr);
+       pmd_idx = pmd_index(vaddr);
+       pgd = pgd_base + pgd_idx;
+
+       for ( ; (pgd_idx < PTRS_PER_PGD_NO_HV) && (vaddr != end); pgd++, pgd_idx++) {
+               if (pgd_none(*pgd)) 
+                       one_md_table_init(pgd);
+
+               pmd = pmd_offset(pgd, vaddr);
+               for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
+                       if (pmd_none(*pmd)) 
+                               one_page_table_init(pmd);
+
+                       vaddr += PMD_SIZE;
+               }
+               pmd_idx = 0;
+       }
+}
+
+void __init wrprotect_bootpt(pgd_t *pgd, void *page, int set)
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long addr;
+
+       addr = (unsigned long)page;
+       pgd += pgd_index(addr);
+       pmd = pmd_offset(pgd, addr);
+       pte = pte_offset_kernel(pmd, addr);
+       if (!pte_present(*pte))
+               return;
+       queue_l1_entry_update(pte, set ? pte_val_ma(*pte) & ~_PAGE_RW :
+           pte_val_ma(*pte) | _PAGE_RW);
+}
+
+static void __init protect_bootpt_entries(pgd_t *spgd, pgd_t *dpgd, int set,
+    int pmdupdate, int pmdset)
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       int pgd_idx, pmd_idx;
+
+       for (pgd_idx = 0; pgd_idx < PTRS_PER_PGD_NO_HV; spgd++, pgd_idx++) {
+               pmd = pmd_offset(spgd, 0);
+               if (pmd_none(*pmd))
+                       continue;
+               if (pmdupdate)
+                       wrprotect_bootpt(dpgd, pmd, pmdset);
+               for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
+                       if (cpu_has_pse) {
+                               /* XXX */
+                       } else {
+                               pte = pte_offset_kernel(pmd, 0);
+                               wrprotect_bootpt(dpgd, pte, set);
+                       }
+               }
+       }
+       flush_page_update_queue();
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total 
+ * of max_low_pfn pages, by creating page tables starting from address 
+ * PAGE_OFFSET.
+ */
+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
+{
+       unsigned long pfn;
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       int pgd_idx, pmd_idx, pte_ofs;
+
+       pgd_idx = pgd_index(PAGE_OFFSET);
+       pgd = pgd_base + pgd_idx;
+       pfn = 0;
+       pmd_idx = pmd_index(PAGE_OFFSET);
+       pte_ofs = pte_index(PAGE_OFFSET);
+
+       for (; pgd_idx < PTRS_PER_PGD_NO_HV; pgd++, pgd_idx++) {
+               pmd = one_md_table_init(pgd);
+               if (pfn >= max_low_pfn)
+                       continue;
+               pmd += pmd_idx;
+               for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
+                       /* Map with big pages if possible, otherwise create normal page tables. */
+                       if (cpu_has_pse) {
+                               set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
+                               pfn += PTRS_PER_PTE;
+                       } else {
+                               pte = one_page_table_init(pmd);
+
+                               pte += pte_ofs;
+                               for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++)
+                                       set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
+                               pte_ofs = 0;
+                       }
+                       flush_page_update_queue();
+               }
+               pmd_idx = 0;
+       }       
+}
+
+static inline int page_kills_ppro(unsigned long pagenr)
+{
+       if (pagenr >= 0x70000 && pagenr <= 0x7003F)
+               return 1;
+       return 0;
+}
+
+extern int is_available_memory(efi_memory_desc_t *);
+
+static inline int page_is_ram(unsigned long pagenr)
+{
+       int i;
+       unsigned long addr, end;
+
+       if (efi_enabled) {
+               efi_memory_desc_t *md;
+
+               for (i = 0; i < memmap.nr_map; i++) {
+                       md = &memmap.map[i];
+                       if (!is_available_memory(md))
+                               continue;
+                       addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+                       end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
+
+                       if ((pagenr >= addr) && (pagenr < end))
+                               return 1;
+               }
+               return 0;
+       }
+
+       for (i = 0; i < e820.nr_map; i++) {
+
+               if (e820.map[i].type != E820_RAM)       /* not usable memory */
+                       continue;
+               /*
+                *      !!!FIXME!!! Some BIOSen report areas as RAM that
+                *      are not. Notably the 640->1Mb area. We need a sanity
+                *      check here.
+                */
+               addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+               end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
+               if  ((pagenr >= addr) && (pagenr < end))
+                       return 1;
+       }
+       return 0;
+}
+
+#ifdef CONFIG_HIGHMEM
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+EXPORT_SYMBOL(kmap_prot);
+EXPORT_SYMBOL(kmap_pte);
+
+#define kmap_get_fixmap_pte(vaddr)                                     \
+       pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
+
+void __init kmap_init(void)
+{
+       unsigned long kmap_vstart;
+
+       /* cache the first kmap pte */
+       kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+       kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+
+       kmap_prot = PAGE_KERNEL;
+}
+
+void __init permanent_kmaps_init(pgd_t *pgd_base)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long vaddr;
+
+       vaddr = PKMAP_BASE;
+       page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+
+       pgd = swapper_pg_dir + pgd_index(vaddr);
+       pmd = pmd_offset(pgd, vaddr);
+       pte = pte_offset_kernel(pmd, vaddr);
+       pkmap_page_table = pte; 
+}
+
+void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
+{
+       if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
+               ClearPageReserved(page);
+               set_bit(PG_highmem, &page->flags);
+               set_page_count(page, 1);
+               __free_page(page);
+               totalhigh_pages++;
+       } else
+               SetPageReserved(page);
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init set_highmem_pages_init(int bad_ppro) 
+{
+       int pfn;
+       for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
+               one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
+       totalram_pages += totalhigh_pages;
+}
+#else
+extern void set_highmem_pages_init(int);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#else
+#define kmap_init() do { } while (0)
+#define permanent_kmaps_init(pgd_base) do { } while (0)
+#define set_highmem_pages_init(bad_ppro) do { } while (0)
+#endif /* CONFIG_HIGHMEM */
+
+unsigned long __PAGE_KERNEL = _PAGE_KERNEL;
+
+#ifndef CONFIG_DISCONTIGMEM
+#define remap_numa_kva() do {} while (0)
+#else
+extern void __init remap_numa_kva(void);
+#endif
+
+static void __init pagetable_init (void)
+{
+       unsigned long vaddr;
+       pgd_t *pgd_base = swapper_pg_dir;
+
+#ifdef CONFIG_X86_PAE
+       int i;
+       /* Init entries of the first-level page table to the zero page */
+       for (i = 0; i < PTRS_PER_PGD; i++)
+               set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
+#endif
+
+       /* Enable PSE if available */
+       if (cpu_has_pse) {
+               set_in_cr4(X86_CR4_PSE);
+       }
+
+       /* Enable PGE if available */
+       if (cpu_has_pge) {
+               set_in_cr4(X86_CR4_PGE);
+               __PAGE_KERNEL |= _PAGE_GLOBAL;
+       }
+
+       kernel_physical_mapping_init(pgd_base);
+       remap_numa_kva();
+
+       /*
+        * Fixed mappings, only the page table structure has to be
+        * created - mappings will be set by set_fixmap():
+        */
+       vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+       page_table_range_init(vaddr, 0, pgd_base);
+
+       permanent_kmaps_init(pgd_base);
+
+#ifdef CONFIG_X86_PAE
+       /*
+        * Add low memory identity-mappings - SMP needs it when
+        * starting up on an AP from real-mode. In the non-PAE
+        * case we already have these mappings through head.S.
+        * All user-space mappings are explicitly cleared after
+        * SMP startup.
+        */
+       pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
+#endif
+}
+
+#if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
+/*
+ * Swap suspend & friends need this for resume because things like the intel-agp
+ * driver might have split up a kernel 4MB mapping.
+ */
+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
+       __attribute__ ((aligned (PAGE_SIZE)));
+
+static inline void save_pg_dir(void)
+{
+       memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
+}
+#else
+static inline void save_pg_dir(void)
+{
+}
+#endif
+
+void zap_low_mappings (void)
+{
+       int i;
+
+       save_pg_dir();
+
+       /*
+        * Zap initial low-memory mappings.
+        *
+        * Note that "pgd_clear()" doesn't do it for
+        * us, because pgd_clear() is a no-op on i386.
+        */
+       for (i = 0; i < USER_PTRS_PER_PGD; i++)
+#ifdef CONFIG_X86_PAE
+               set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
+#else
+               set_pgd(swapper_pg_dir+i, __pgd(0));
+#endif
+       flush_tlb_all();
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init zone_sizes_init(void)
+{
+       unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+       unsigned int high, low;
+       
+       low = max_low_pfn;
+       high = highend_pfn;
+       
+       zones_size[ZONE_NORMAL] = low;
+#ifdef CONFIG_HIGHMEM
+       zones_size[ZONE_HIGHMEM] = high - low;
+#endif
+       free_area_init(zones_size);     
+}
+#else
+extern void zone_sizes_init(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+/*
+ * - write protect new L1 pages in old pgd
+ * - write protect new pgd page in old pgd
+ * - write protect new pgd page in new pgd
+ * - write protect new L1 pages in new pgd
+ * - write protect old L1 pages in new pgd and write protect old pgd in
+ *   new pgd
+ * - pin new pgd
+ * - switch to new pgd
+ * - unpin old pgd
+ * - make old L1 pages and old pgd page writeable in new pgd
+ */
+/*
+ * paging_init() sets up the page tables - note that the first 8MB are
+ * already mapped by head.S.
+ *
+ * This routines also unmaps the page at virtual kernel address 0, so
+ * that we can trap those pesky NULL-reference errors in the kernel.
+ */
+void __init paging_init(void)
+{
+       pagetable_init();
+
+       wrprotect_bootpt((pgd_t *)start_info.pt_base, swapper_pg_dir, 1);
+       wrprotect_bootpt(swapper_pg_dir, swapper_pg_dir, 1);
+       flush_page_update_queue();
+       protect_bootpt_entries(swapper_pg_dir, swapper_pg_dir, 1, 0, 0);
+       protect_bootpt_entries((pgd_t *)start_info.pt_base, swapper_pg_dir,
+           1, 1, 1);
+       queue_pgd_pin(__pa(swapper_pg_dir));
+       flush_page_update_queue();
+       load_cr3(swapper_pg_dir);
+       __flush_tlb_all();
+       queue_pgd_unpin(__pa(start_info.pt_base));
+       flush_page_update_queue();
+       protect_bootpt_entries((pgd_t *)start_info.pt_base, swapper_pg_dir,
+           0, 1, 0);
+       wrprotect_bootpt((pgd_t *)start_info.pt_base, swapper_pg_dir, 0);
+
+#ifdef CONFIG_X86_PAE
+       /*
+        * We will bail out later - printk doesn't work right now so
+        * the user would just see a hanging kernel.
+        */
+       if (cpu_has_pae)
+               set_in_cr4(X86_CR4_PAE);
+#endif
+       __flush_tlb_all();
+
+       kmap_init();
+       zone_sizes_init();
+
+       /* Switch to the real shared_info page, and clear the dummy page. */
+       flush_page_update_queue();
+       set_fixmap_ma(FIX_SHARED_INFO, start_info.shared_info);
+       HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+       memset(empty_zero_page, 0, sizeof(empty_zero_page));
+}
+
+/*
+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
+ * used to involve black magic jumps to work around some nasty CPU bugs,
+ * but fortunately the switch to using exceptions got rid of all that.
+ */
+
+void __init test_wp_bit(void)
+{
+       printk("Checking if this processor honours the WP bit even in supervisor mode... ");
+
+       /* Any page-aligned address will do, the test is non-destructive */
+       __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
+       boot_cpu_data.wp_works_ok = do_test_wp_bit();
+       clear_fixmap(FIX_WP_TEST);
+
+       if (!boot_cpu_data.wp_works_ok) {
+               printk("No.\n");
+#ifdef CONFIG_X86_WP_WORKS_OK
+               panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
+#endif
+       } else {
+               printk("Ok.\n");
+       }
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static void __init set_max_mapnr_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+       highmem_start_page = pfn_to_page(highstart_pfn);
+       max_mapnr = num_physpages = highend_pfn;
+#else
+       max_mapnr = num_physpages = max_low_pfn;
+#endif
+}
+#define __free_all_bootmem() free_all_bootmem()
+#else
+#define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
+extern void set_max_mapnr_init(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+static struct kcore_list kcore_mem, kcore_vmalloc; 
+
+void __init mem_init(void)
+{
+       extern int ppro_with_ram_bug(void);
+       int codesize, reservedpages, datasize, initsize;
+       int tmp;
+       int bad_ppro;
+
+#ifndef CONFIG_DISCONTIGMEM
+       if (!mem_map)
+               BUG();
+#endif
+       
+       bad_ppro = ppro_with_ram_bug();
+
+#ifdef CONFIG_HIGHMEM
+       /* check that fixmap and pkmap do not overlap */
+       if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
+               printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
+               printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
+                               PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
+               BUG();
+       }
+#endif
+       set_max_mapnr_init();
+
+#ifdef CONFIG_HIGHMEM
+       high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
+#else
+       high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+#endif
+
+       /* this will put all low memory onto the freelists */
+       totalram_pages += __free_all_bootmem();
+
+       reservedpages = 0;
+       for (tmp = 0; tmp < max_low_pfn; tmp++)
+               /*
+                * Only count reserved RAM pages
+                */
+               if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
+                       reservedpages++;
+
+       set_highmem_pages_init(bad_ppro);
+
+       codesize =  (unsigned long) &_etext - (unsigned long) &_text;
+       datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
+       initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+       kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
+       kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
+                  VMALLOC_END-VMALLOC_START);
+
+       printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+               (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+               num_physpages << (PAGE_SHIFT-10),
+               codesize >> 10,
+               reservedpages << (PAGE_SHIFT-10),
+               datasize >> 10,
+               initsize >> 10,
+               (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
+              );
+
+#ifdef CONFIG_X86_PAE
+       if (!cpu_has_pae)
+               panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
+#endif
+       if (boot_cpu_data.wp_works_ok < 0)
+               test_wp_bit();
+
+       /*
+        * Subtle. SMP is doing it's boot stuff late (because it has to
+        * fork idle threads) - but it also needs low mappings for the
+        * protected-mode entry to work. We zap these entries only after
+        * the WP-bit has been tested.
+        */
+#ifndef CONFIG_SMP
+       zap_low_mappings();
+#endif
+}
+
+kmem_cache_t *pgd_cache;
+kmem_cache_t *pmd_cache;
+
+void __init pgtable_cache_init(void)
+{
+       if (PTRS_PER_PMD > 1) {
+               pmd_cache = kmem_cache_create("pmd",
+                                       PTRS_PER_PMD*sizeof(pmd_t),
+                                       PTRS_PER_PMD*sizeof(pmd_t),
+                                       0,
+                                       pmd_ctor,
+                                       NULL);
+               if (!pmd_cache)
+                       panic("pgtable_cache_init(): cannot create pmd cache");
+       }
+       pgd_cache = kmem_cache_create("pgd",
+                               PTRS_PER_PGD*sizeof(pgd_t),
+                               PTRS_PER_PGD*sizeof(pgd_t),
+                               0,
+                               pgd_ctor,
+                               pgd_dtor);
+       if (!pgd_cache)
+               panic("pgtable_cache_init(): Cannot create pgd cache");
+}
+
+/*
+ * This function cannot be __init, since exceptions don't work in that
+ * section.  Put this after the callers, so that it cannot be inlined.
+ */
+static int do_test_wp_bit(void)
+{
+       char tmp_reg;
+       int flag;
+
+       __asm__ __volatile__(
+               "       movb %0,%1      \n"
+               "1:     movb %1,%0      \n"
+               "       xorl %2,%2      \n"
+               "2:                     \n"
+               ".section __ex_table,\"a\"\n"
+               "       .align 4        \n"
+               "       .long 1b,2b     \n"
+               ".previous              \n"
+               :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
+                "=q" (tmp_reg),
+                "=r" (flag)
+               :"2" (1)
+               :"memory");
+       
+       return flag;
+}
+
+void free_initmem(void)
+{
+       unsigned long addr;
+
+       addr = (unsigned long)(&__init_begin);
+       for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(addr));
+               set_page_count(virt_to_page(addr), 1);
+               free_page(addr);
+               totalram_pages++;
+       }
+       printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+       if (start < end)
+               printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+       for (; start < end; start += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
+               free_page(start);
+               totalram_pages++;
+       }
+}
+#endif
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/pgtable.c b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/pgtable.c
new file mode 100644 (file)
index 0000000..be0d311
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ *  linux/arch/i386/mm/pgtable.c
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/spinlock.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/fixmap.h>
+#include <asm/e820.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/io.h>
+
+void show_mem(void)
+{
+       int total = 0, reserved = 0;
+       int shared = 0, cached = 0;
+       int highmem = 0;
+       struct page *page;
+       pg_data_t *pgdat;
+       unsigned long i;
+
+       printk("Mem-info:\n");
+       show_free_areas();
+       printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+       for_each_pgdat(pgdat) {
+               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
+                       page = pgdat->node_mem_map + i;
+                       total++;
+                       if (PageHighMem(page))
+                               highmem++;
+                       if (PageReserved(page))
+                               reserved++;
+                       else if (PageSwapCache(page))
+                               cached++;
+                       else if (page_count(page))
+                               shared += page_count(page) - 1;
+               }
+       }
+       printk("%d pages of RAM\n", total);
+       printk("%d pages of HIGHMEM\n",highmem);
+       printk("%d reserved pages\n",reserved);
+       printk("%d pages shared\n",shared);
+       printk("%d pages swap cached\n",cached);
+}
+
+/*
+ * Associate a virtual page frame with a given physical page frame 
+ * and protection flags for that frame.
+ */ 
+static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pgd = swapper_pg_dir + pgd_index(vaddr);
+       if (pgd_none(*pgd)) {
+               BUG();
+               return;
+       }
+       pmd = pmd_offset(pgd, vaddr);
+       if (pmd_none(*pmd)) {
+               BUG();
+               return;
+       }
+       pte = pte_offset_kernel(pmd, vaddr);
+       /* <pfn,flags> stored as-is, to permit clearing entries */
+       set_pte(pte, pfn_pte(pfn, flags));
+
+       /*
+        * It's enough to flush this one mapping.
+        * (PGE mappings get flushed as well)
+        */
+       __flush_tlb_one(vaddr);
+}
+
+/*
+ * Associate a virtual page frame with a given physical page frame 
+ * and protection flags for that frame.
+ */ 
+static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
+                          pgprot_t flags)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pgd = swapper_pg_dir + pgd_index(vaddr);
+       if (pgd_none(*pgd)) {
+               BUG();
+               return;
+       }
+       pmd = pmd_offset(pgd, vaddr);
+       if (pmd_none(*pmd)) {
+               BUG();
+               return;
+       }
+       pte = pte_offset_kernel(pmd, vaddr);
+       /* <pfn,flags> stored as-is, to permit clearing entries */
+       set_pte(pte, pfn_pte_ma(pfn, flags));
+
+       /*
+        * It's enough to flush this one mapping.
+        * (PGE mappings get flushed as well)
+        */
+       __flush_tlb_one(vaddr);
+}
+
+/*
+ * Associate a large virtual page frame with a given physical page frame 
+ * and protection flags for that frame. pfn is for the base of the page,
+ * vaddr is what the page gets mapped to - both must be properly aligned. 
+ * The pmd must already be instantiated. Assumes PAE mode.
+ */ 
+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+
+       if (vaddr & (PMD_SIZE-1)) {             /* vaddr is misaligned */
+               printk ("set_pmd_pfn: vaddr misaligned\n");
+               return; /* BUG(); */
+       }
+       if (pfn & (PTRS_PER_PTE-1)) {           /* pfn is misaligned */
+               printk ("set_pmd_pfn: pfn misaligned\n");
+               return; /* BUG(); */
+       }
+       pgd = swapper_pg_dir + pgd_index(vaddr);
+       if (pgd_none(*pgd)) {
+               printk ("set_pmd_pfn: pgd_none\n");
+               return; /* BUG(); */
+       }
+       pmd = pmd_offset(pgd, vaddr);
+       set_pmd(pmd, pfn_pmd(pfn, flags));
+       /*
+        * It's enough to flush this one mapping.
+        * (PGE mappings get flushed as well)
+        */
+       __flush_tlb_one(vaddr);
+}
+
+void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+{
+       unsigned long address = __fix_to_virt(idx);
+
+       if (idx >= __end_of_fixed_addresses) {
+               BUG();
+               return;
+       }
+       set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
+}
+
+void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+{
+       unsigned long address = __fix_to_virt(idx);
+
+       if (idx >= __end_of_fixed_addresses) {
+               BUG();
+               return;
+       }
+       set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
+}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       if (pte) {
+               clear_page(pte);
+               __make_page_readonly(pte);
+       }
+       return pte;
+}
+
+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+       struct page *pte;
+
+#ifdef CONFIG_HIGHPTE
+       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+#else
+       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+#endif
+       if (pte) {
+               clear_highpage(pte);
+               __make_page_readonly(phys_to_virt(page_to_phys(pte)));
+                               /* XXXcl highmem */
+       }
+       return pte;
+}
+
+void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
+{
+       memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
+}
+
+/*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+ * in both cached and uncached pgd's; not needed for PAE since the
+ * kernel pmd is shared. If PAE were not to share the pmd a similar
+ * tactic would be needed. This is essentially codepath-based locking
+ * against pageattr.c; it is the unique case in which a valid change
+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
+ * vmalloc faults work because attached pagetables are never freed.
+ * If the locking proves to be non-performant, a ticketing scheme with
+ * checks at dup_mmap(), exec(), and other mmlist addition points
+ * could be used. The locking scheme was chosen on the basis of
+ * manfred's recommendations and having no core impact whatsoever.
+ * -- wli
+ */
+spinlock_t pgd_lock = SPIN_LOCK_UNLOCKED;
+struct page *pgd_list;
+
+static inline void pgd_list_add(pgd_t *pgd)
+{
+       struct page *page = virt_to_page(pgd);
+       page->index = (unsigned long)pgd_list;
+       if (pgd_list)
+               pgd_list->private = (unsigned long)&page->index;
+       pgd_list = page;
+       page->private = (unsigned long)&pgd_list;
+}
+
+static inline void pgd_list_del(pgd_t *pgd)
+{
+       struct page *next, **pprev, *page = virt_to_page(pgd);
+       next = (struct page *)page->index;
+       pprev = (struct page **)page->private;
+       *pprev = next;
+       if (next)
+               next->private = (unsigned long)pprev;
+}
+
+void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+{
+       unsigned long flags;
+
+       if (PTRS_PER_PMD == 1)
+               spin_lock_irqsave(&pgd_lock, flags);
+
+       memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
+                       swapper_pg_dir + USER_PTRS_PER_PGD,
+                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+       if (PTRS_PER_PMD > 1)
+               goto out;
+
+       pgd_list_add(pgd);
+       spin_unlock_irqrestore(&pgd_lock, flags);
+       memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
+ out:
+       __make_page_readonly(pgd);
+       queue_pgd_pin(__pa(pgd));
+       flush_page_update_queue();
+}
+
+/* never called when PTRS_PER_PMD > 1 */
+void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+{
+       unsigned long flags; /* can be called from interrupt context */
+
+       queue_pgd_unpin(__pa(pgd));
+       __make_page_writeable(pgd);
+       flush_page_update_queue();
+
+       if (PTRS_PER_PMD > 1)
+               return;
+
+       spin_lock_irqsave(&pgd_lock, flags);
+       pgd_list_del(pgd);
+       spin_unlock_irqrestore(&pgd_lock, flags);
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+       int i;
+       pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
+
+       if (PTRS_PER_PMD == 1 || !pgd)
+               return pgd;
+
+       for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
+               pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+               if (!pmd)
+                       goto out_oom;
+               set_pgd(&pgd[i], __pgd(1 + __pa((u64)((u32)pmd))));
+       }
+       return pgd;
+
+out_oom:
+       for (i--; i >= 0; i--)
+               kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
+       kmem_cache_free(pgd_cache, pgd);
+       return NULL;
+}
+
+void pgd_free(pgd_t *pgd)
+{
+       int i;
+
+       /* in the PAE case user pgd entries are overwritten before usage */
+       if (PTRS_PER_PMD > 1)
+               for (i = 0; i < USER_PTRS_PER_PGD; ++i)
+                       kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
+       /* in the non-PAE case, clear_page_tables() clears user pgd entries */
+       kmem_cache_free(pgd_cache, pgd);
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/kernel/Makefile b/linux-2.6.7-xen-sparse/arch/xen/kernel/Makefile
new file mode 100644 (file)
index 0000000..4f164b0
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux kernel.
+#
+
+XENARCH        := $(subst ",,$(CONFIG_XENARCH))
+
+$(obj)/vmlinux.lds.s: arch/$(ARCH)/$(XENARCH)/kernel/vmlinux.lds.s
+       @ln -fsn $(srctree)/arch/$(ARCH)/$(XENARCH)/kernel/vmlinux.lds.s $@
+
+extra-y += vmlinux.lds.s
+
+obj-y  := ctrl_if.o process.o reboot.o empty.o
diff --git a/linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c b/linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c
new file mode 100644 (file)
index 0000000..0a2449d
--- /dev/null
@@ -0,0 +1,478 @@
+/******************************************************************************
+ * ctrl_if.c
+ * 
+ * Management functions for special interface to the domain controller.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/evtchn.h>
+
+#if 0
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+                           __FILE__ , __LINE__ , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+/*
+ * Only used by initial domain which must create its own control-interface
+ * event channel. This value is picked up by the user-space domain controller
+ * via an ioctl.
+ */
+int initdom_ctrlif_domcontroller_port = -1;
+
+static int        ctrl_if_evtchn;
+static int        ctrl_if_irq;
+static spinlock_t ctrl_if_lock;
+
+static struct irqaction ctrl_if_irq_action;
+
+static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
+static CONTROL_RING_IDX ctrl_if_rx_req_cons;
+
+/* Incoming message requests. */
+    /* Primary message type -> message handler. */
+static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
+    /* Primary message type -> callback in process context? */
+static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
+    /* Is it late enough during bootstrap to use schedule_task()? */
+static int safe_to_schedule_task;
+#if 0                           /* XXXcl tq */
+    /* Passed to schedule_task(). */
+static struct tq_struct ctrl_if_rxmsg_deferred_tq;
+#else
+static struct work_struct ctrl_if_rxmsg_deferred_work;
+#endif
+    /* Queue up messages to be handled in process context. */
+static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
+static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
+static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
+
+/* Incoming message responses: message identifier -> message handler/id. */
+static struct {
+    ctrl_msg_handler_t fn;
+    unsigned long      id;
+} ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
+
+#if 0                           /* XXXcl tq */
+static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
+#else
+static struct workqueue_struct *ctrl_if_tx_wq = NULL;
+#endif
+static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
+static void __ctrl_if_tx_tasklet(unsigned long data);
+static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
+
+static void __ctrl_if_rx_tasklet(unsigned long data);
+static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
+
+#define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
+#define TX_FULL(_c)   \
+    (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
+
+static void ctrl_if_notify_controller(void)
+{
+    notify_via_evtchn(ctrl_if_evtchn);
+}
+
+static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
+{
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+static void __ctrl_if_tx_tasklet(unsigned long data)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+    ctrl_msg_t   *msg;
+    int           was_full = TX_FULL(ctrl_if);
+
+    while ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
+    {
+        msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
+
+        DPRINTK("Rx-Rsp %u/%u :: %d/%d\n", 
+                ctrl_if_tx_resp_cons,
+                ctrl_if->tx_resp_prod,
+                msg->type, msg->subtype);
+
+        /* Execute the callback handler, if one was specified. */
+        if ( msg->id != 0xFF )
+        {
+            (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
+                msg, ctrl_if_txmsg_id_mapping[msg->id].id);
+            smp_mb(); /* Execute, /then/ free. */
+            ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
+        }
+
+        /*
+         * Step over the message in the ring /after/ finishing reading it. As 
+         * soon as the index is updated then the message may get blown away.
+         */
+        smp_mb();
+        ctrl_if_tx_resp_cons++;
+    }
+
+    if ( was_full && !TX_FULL(ctrl_if) )
+    {
+        wake_up(&ctrl_if_tx_wait);
+#if 0                           /* XXXcl tq */
+        run_task_queue(&ctrl_if_tx_tq);
+#endif
+    }
+}
+
+static void __ctrl_if_rxmsg_deferred(void *unused)
+{
+    ctrl_msg_t *msg;
+
+    while ( ctrl_if_rxmsg_deferred_cons != ctrl_if_rxmsg_deferred_prod )
+    {
+        msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
+            ctrl_if_rxmsg_deferred_cons++)];
+        (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
+    }
+}
+
+static void __ctrl_if_rx_tasklet(unsigned long data)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+    ctrl_msg_t    msg, *pmsg;
+
+    while ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
+    {
+        pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
+        memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
+
+        DPRINTK("Rx-Req %u/%u :: %d/%d\n", 
+                ctrl_if_rx_req_cons-1,
+                ctrl_if->rx_req_prod,
+                msg.type, msg.subtype);
+
+        if ( msg.length != 0 )
+            memcpy(msg.msg, pmsg->msg, msg.length);
+
+        if ( test_bit(msg.type, (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
+        {
+            pmsg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
+                ctrl_if_rxmsg_deferred_prod++)];
+            memcpy(pmsg, &msg, offsetof(ctrl_msg_t, msg) + msg.length);
+#if 0                           /* XXXcl tq */
+            schedule_task(&ctrl_if_rxmsg_deferred_tq);
+#else
+            schedule_work(&ctrl_if_rxmsg_deferred_work);
+#endif
+        }
+        else
+        {
+            (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
+        }
+    }
+}
+
+static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+
+    if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
+        tasklet_schedule(&ctrl_if_tx_tasklet);
+
+    if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
+        tasklet_schedule(&ctrl_if_rx_tasklet);
+
+    return IRQ_HANDLED;
+}
+
+int ctrl_if_send_message_noblock(
+    ctrl_msg_t *msg, 
+    ctrl_msg_handler_t hnd,
+    unsigned long id)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+    unsigned long flags;
+    int           i;
+
+    spin_lock_irqsave(&ctrl_if_lock, flags);
+
+    if ( TX_FULL(ctrl_if) )
+    {
+        spin_unlock_irqrestore(&ctrl_if_lock, flags);
+        return -EAGAIN;
+    }
+
+    msg->id = 0xFF;
+    if ( hnd != NULL )
+    {
+        for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
+            continue;
+        ctrl_if_txmsg_id_mapping[i].fn = hnd;
+        ctrl_if_txmsg_id_mapping[i].id = id;
+        msg->id = i;
+    }
+
+    DPRINTK("Tx-Req %u/%u :: %d/%d\n", 
+            ctrl_if->tx_req_prod, 
+            ctrl_if_tx_resp_cons,
+            msg->type, msg->subtype);
+
+    memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)], 
+           msg, sizeof(*msg));
+    wmb(); /* Write the message before letting the controller peek at it. */
+    ctrl_if->tx_req_prod++;
+
+    spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+    ctrl_if_notify_controller();
+
+    return 0;
+}
+
+int ctrl_if_send_message_block(
+    ctrl_msg_t *msg, 
+    ctrl_msg_handler_t hnd, 
+    unsigned long id,
+    long wait_state)
+{
+    DECLARE_WAITQUEUE(wait, current);
+    int rc;
+
+    /* Fast path. */
+    if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+        return rc;
+
+    add_wait_queue(&ctrl_if_tx_wait, &wait);
+
+    for ( ; ; )
+    {
+        set_current_state(wait_state);
+
+        if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+            break;
+
+        rc = -ERESTARTSYS;
+        if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
+            break;
+
+        schedule();
+    }
+
+    set_current_state(TASK_RUNNING);
+    remove_wait_queue(&ctrl_if_tx_wait, &wait);
+
+    return rc;
+}
+
+int ctrl_if_enqueue_space_callback(struct work_struct *work)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+
+    /* Fast path. */
+    if ( !TX_FULL(ctrl_if) )
+        return 0;
+
+#if 0                           /* XXXcl tq */
+    (void)queue_task(task, &ctrl_if_tx_tq);
+#else
+    if (ctrl_if_tx_wq)
+        (void)queue_work(ctrl_if_tx_wq, work);
+    else
+        return 1;
+#endif
+
+    /*
+     * We may race execution of the task queue, so return re-checked status. If
+     * the task is not executed despite the ring being non-full then we will
+     * certainly return 'not full'.
+     */
+    smp_mb();
+    return TX_FULL(ctrl_if);
+}
+
+void ctrl_if_send_response(ctrl_msg_t *msg)
+{
+    control_if_t *ctrl_if = get_ctrl_if();
+    unsigned long flags;
+    ctrl_msg_t   *dmsg;
+
+    /*
+     * NB. The response may the original request message, modified in-place.
+     * In this situation we may have src==dst, so no copying is required.
+     */
+    spin_lock_irqsave(&ctrl_if_lock, flags);
+
+    DPRINTK("Tx-Rsp %u :: %d/%d\n", 
+            ctrl_if->rx_resp_prod, 
+            msg->type, msg->subtype);
+
+    dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
+    if ( dmsg != msg )
+        memcpy(dmsg, msg, sizeof(*msg));
+
+    wmb(); /* Write the message before letting the controller peek at it. */
+    ctrl_if->rx_resp_prod++;
+
+    spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+    ctrl_if_notify_controller();
+}
+
+int ctrl_if_register_receiver(
+    u8 type, 
+    ctrl_msg_handler_t hnd, 
+    unsigned int flags)
+{
+    unsigned long _flags;
+    int inuse;
+
+    spin_lock_irqsave(&ctrl_if_lock, _flags);
+
+    inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
+
+    if ( inuse )
+    {
+        printk(KERN_INFO "Receiver %p already established for control "
+               "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
+    }
+    else
+    {
+        ctrl_if_rxmsg_handler[type] = hnd;
+        clear_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+        if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
+        {
+            set_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+            if ( !safe_to_schedule_task )
+                BUG();
+        }
+    }
+
+    spin_unlock_irqrestore(&ctrl_if_lock, _flags);
+
+    return !inuse;
+}
+
+void ctrl_if_unregister_receiver(u8 type, ctrl_msg_handler_t hnd)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&ctrl_if_lock, flags);
+
+    if ( ctrl_if_rxmsg_handler[type] != hnd )
+        printk(KERN_INFO "Receiver %p is not registered for control "
+               "messages of type %d.\n", hnd, type);
+    else
+        ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
+
+    spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+    /* Ensure that @hnd will not be executed after this function returns. */
+    tasklet_unlock_wait(&ctrl_if_rx_tasklet);
+}
+
+void ctrl_if_suspend(void)
+{
+    free_irq(ctrl_if_irq, NULL);
+    unbind_evtchn_from_irq(ctrl_if_evtchn);
+}
+
+/** Reset the control interface progress pointers.
+ * Marks the queues empty if 'clear' non-zero.
+ */
+void ctrl_if_reset(int clear){
+    control_if_t *ctrl_if = get_ctrl_if();
+
+    if(clear){
+        *ctrl_if = (control_if_t){};
+    }
+    ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
+    ctrl_if_rx_req_cons  = ctrl_if->rx_resp_prod;
+}
+
+void ctrl_if_resume(void)
+{
+    if ( start_info.flags & SIF_INITDOMAIN )
+    {
+        /*
+         * The initial domain must create its own domain-controller link.
+         * The controller is probably not running at this point, but will
+         * pick up its end of the event channel from 
+         */
+        evtchn_op_t op;
+        op.cmd = EVTCHNOP_bind_interdomain;
+        op.u.bind_interdomain.dom1 = DOMID_SELF;
+        op.u.bind_interdomain.dom2 = DOMID_SELF;
+        if ( HYPERVISOR_event_channel_op(&op) != 0 )
+            BUG();
+        start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
+        initdom_ctrlif_domcontroller_port   = op.u.bind_interdomain.port2;
+    }
+
+    ctrl_if_reset(0);
+
+    ctrl_if_evtchn = start_info.domain_controller_evtchn;
+    ctrl_if_irq    = bind_evtchn_to_irq(ctrl_if_evtchn);
+
+#define SA_STATIC_ACTION 0x01000000 /* so that free_irq() doesn't do kfree() */
+    memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
+    ctrl_if_irq_action.handler = ctrl_if_interrupt;
+    ctrl_if_irq_action.name    = "ctrl-if";
+    ctrl_if_irq_action.flags   = SA_STATIC_ACTION;
+    (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
+}
+
+void __init ctrl_if_init(void)
+{
+        int i;
+
+    for ( i = 0; i < 256; i++ )
+        ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
+#if 0                           /* XXXcl tq */
+    ctrl_if_rxmsg_deferred_tq.routine = __ctrl_if_rxmsg_deferred;
+#else
+    INIT_WORK(&ctrl_if_rxmsg_deferred_work, (void *)__ctrl_if_rxmsg_deferred,
+              NULL);
+#endif
+
+    spin_lock_init(&ctrl_if_lock);
+
+    ctrl_if_reset(1);
+    ctrl_if_resume();
+}
+
+
+/* This is called after it is safe to call schedule_task(). */
+static int __init ctrl_if_late_setup(void)
+{
+    safe_to_schedule_task = 1;
+    ctrl_if_tx_wq = create_workqueue("ctrl_if_tx");
+    if (ctrl_if_tx_wq == NULL)
+        return 1;                 /* XXX */
+    return 0;
+}
+__initcall(ctrl_if_late_setup);
+
+
+/*
+ * !! The following are DANGEROUS FUNCTIONS !!
+ * Use with care [for example, see xencons_force_flush()].
+ */
+
+int ctrl_if_transmitter_empty(void)
+{
+    return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
+}
+
+void ctrl_if_discard_responses(void)
+{
+    ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
+}
+
diff --git a/linux-2.6.7-xen-sparse/arch/xen/kernel/empty.c b/linux-2.6.7-xen-sparse/arch/xen/kernel/empty.c
new file mode 100644 (file)
index 0000000..90a8486
--- /dev/null
@@ -0,0 +1,44 @@
+
+#include <linux/string.h>
+#include <asm/hypervisor.h>
+
+#if 0
+static __inline__ int HYPERVISOR_console_write(const char *str, int count)
+{
+       int ret;
+       __asm__ __volatile__ (
+               TRAP_INSTR
+               : "=a" (ret) : "0" (__HYPERVISOR_console_write), 
+               "b" (str), "c" (count) : "memory" );
+
+
+       return ret;
+}
+#endif
+
+#if 01
+void
+xen_puts(const char *str)
+{
+
+       (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(str), (char *)str);
+}
+
+asmlinkage int CLPRINTK(const char *fmt, ...)
+{
+       va_list args;
+       int printk_len;
+       static char printk_buf[1024+1];
+    
+       /* Emit the output into the temporary buffer */
+       va_start(args, fmt);
+       printk_len = vsnprintf(printk_buf, sizeof(printk_buf)-1, fmt, args);
+       va_end(args);
+
+       printk_buf[printk_len] = 0;
+       /* Send the processed output directly to Xen. */
+       (void)HYPERVISOR_console_io(CONSOLEIO_write, printk_len, printk_buf);
+
+       return 0;
+}
+#endif
diff --git a/linux-2.6.7-xen-sparse/arch/xen/kernel/process.c b/linux-2.6.7-xen-sparse/arch/xen/kernel/process.c
new file mode 100644 (file)
index 0000000..a6930ee
--- /dev/null
@@ -0,0 +1,20 @@
+
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/platform.h>
+#include <linux/pm.h>
+
+
+void xen_cpu_idle (void)
+{
+       // local_irq_disable();
+       if (need_resched()) {
+               // local_irq_enable();
+               return;
+       }
+       // local_irq_enable();
+       HYPERVISOR_yield();
+}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/kernel/reboot.c b/linux-2.6.7-xen-sparse/arch/xen/kernel/reboot.c
new file mode 100644 (file)
index 0000000..17cc761
--- /dev/null
@@ -0,0 +1,39 @@
+
+#include <linux/module.h>
+#include <asm/hypervisor.h>
+
+int reboot_thru_bios = 0;      /* for dmi_scan.c */
+
+void machine_restart(char * __unused)
+{
+       /* We really want to get pending console data out before we die. */
+       extern void xencons_force_flush(void);
+       xencons_force_flush();
+       HYPERVISOR_reboot();
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void machine_halt(void)
+{
+       /* We really want to get pending console data out before we die. */
+       extern void xencons_force_flush(void);
+       xencons_force_flush();
+       for ( ; ; ) /* loop without wasting cpu cycles */
+       {
+               HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_pending = 0;
+               HYPERVISOR_block();
+       }
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+       /* We really want to get pending console data out before we die. */
+       extern void xencons_force_flush(void);
+       xencons_force_flush();
+       HYPERVISOR_shutdown();
+}
+
+EXPORT_SYMBOL(machine_power_off);
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/Makefile b/linux-2.6.7-xen-sparse/drivers/xen/Makefile
new file mode 100644 (file)
index 0000000..9124053
--- /dev/null
@@ -0,0 +1,6 @@
+
+
+obj-y  += evtchn/
+obj-y  += console/
+obj-y  += block/
+obj-y  += net/
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/block/Kconfig b/linux-2.6.7-xen-sparse/drivers/xen/block/Kconfig
new file mode 100644 (file)
index 0000000..edde837
--- /dev/null
@@ -0,0 +1,6 @@
+
+config XENBLOCK
+       tristate "Block device driver"
+       depends on ARCH_XEN
+       help
+         Block device driver for Xen
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/block/Makefile b/linux-2.6.7-xen-sparse/drivers/xen/block/Makefile
new file mode 100644 (file)
index 0000000..de77b96
--- /dev/null
@@ -0,0 +1,3 @@
+
+obj-y  := vbd.o block.o
+
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/block/block.c b/linux-2.6.7-xen-sparse/drivers/xen/block/block.c
new file mode 100644 (file)
index 0000000..db9c6b4
--- /dev/null
@@ -0,0 +1,653 @@
+/******************************************************************************
+ * block.c
+ * 
+ * XenLinux virtual block-device driver.
+ * 
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ */
+
+#include "block.h"
+#include <linux/cdrom.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi.h>
+#include <asm-xen/ctrl_if.h>
+
+typedef unsigned char byte; /* from linux/ide.h */
+
+#define BLKIF_STATE_CLOSED       0
+#define BLKIF_STATE_DISCONNECTED 1
+#define BLKIF_STATE_CONNECTED    2
+static unsigned int blkif_state = BLKIF_STATE_CLOSED;
+static unsigned int blkif_evtchn, blkif_irq;
+
+static int blkif_control_rsp_valid;
+static blkif_response_t blkif_control_rsp;
+
+static blkif_ring_t *blk_ring;
+static BLKIF_RING_IDX resp_cons; /* Response consumer for comms ring. */
+static BLKIF_RING_IDX req_prod;  /* Private request producer.         */
+
+static blkif_ring_t *blk_ring_rec; /* Private copy of requests, used for
+                                    * recovery.  Responses not stored here. */
+static BLKIF_RING_IDX resp_cons_rec; /* Copy of response consumer, used for
+                                      * recovery */
+static int recovery = 0;           /* "Recovery in progress" flag.  Protected
+                                    * by the blkif_io_lock */
+
+/* We plug the I/O ring if the driver is suspended or if the ring is full. */
+#define        BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
+                        (blkif_state != BLKIF_STATE_CONNECTED))
+
+/*
+ * Request queues with outstanding work, but ring is currently full.
+ * We need no special lock here, as we always access this with the
+ * blkif_io_lock held. We only need a small maximum list.
+ */
+#define MAX_PENDING 8
+static request_queue_t *pending_queues[MAX_PENDING];
+static int nr_pending;
+
+static inline void flush_requests(void)
+{
+
+        blk_ring->req_prod = req_prod;
+
+        notify_via_evtchn(blkif_evtchn);
+}
+
+
+#if 0
+/*
+ * blkif_update_int/update-vbds_task - handle VBD update events.
+ *  Schedule a task for keventd to run, which will update the VBDs and perform 
+ *  the corresponding updates to our view of VBD state.
+ */
+static struct tq_struct update_tq;
+static void update_vbds_task(void *unused)
+{ 
+    xlvbd_update_vbds();
+}
+#endif
+
+
+int blkif_open(struct inode *inode, struct file *filep)
+{
+       struct gendisk *gd = inode->i_bdev->bd_disk;
+       struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
+
+       /* Update of usage count is protected by per-device semaphore. */
+       di->mi->usage++;
+
+       return 0;
+}
+
+
+int blkif_release(struct inode *inode, struct file *filep)
+{
+       struct gendisk *gd = inode->i_bdev->bd_disk;
+       struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
+
+       /*
+        * When usage drops to zero it may allow more VBD updates to occur.
+        * Update of usage count is protected by a per-device semaphore.
+        */
+       if (--di->mi->usage == 0) {
+#if 0
+               update_tq.routine = update_vbds_task;
+               schedule_task(&update_tq);
+#endif
+       }
+
+       return 0;
+}
+
+
+int blkif_ioctl(struct inode *inode, struct file *filep,
+                          unsigned command, unsigned long argument)
+{
+       /*  struct gendisk *gd = inode->i_bdev->bd_disk; */
+
+       DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
+           command, (long)argument, inode->i_rdev); 
+  
+       switch (command) {
+
+       case HDIO_GETGEO:
+               /* return ENOSYS to use defaults */
+               return -ENOSYS;
+
+       default:
+               printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
+                      command);
+               return -ENOSYS;
+       }
+
+       return 0;
+}
+
+#if 0
+/* check media change: should probably do something here in some cases :-) */
+int blkif_check(kdev_t dev)
+{
+    DPRINTK("blkif_check\n");
+    return 0;
+}
+
+int blkif_revalidate(kdev_t dev)
+{
+    struct block_device *bd;
+    struct gendisk *gd;
+    xen_block_t *disk;
+    unsigned long capacity;
+    int i, rc = 0;
+    
+    if ( (bd = bdget(dev)) == NULL )
+        return -EINVAL;
+
+    /*
+     * Update of partition info, and check of usage count, is protected
+     * by the per-block-device semaphore.
+     */
+    down(&bd->bd_sem);
+
+    if ( ((gd = get_gendisk(dev)) == NULL) ||
+         ((disk = xldev_to_xldisk(dev)) == NULL) ||
+         ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    if ( disk->usage > 1 )
+    {
+        rc = -EBUSY;
+        goto out;
+    }
+
+    /* Only reread partition table if VBDs aren't mapped to partitions. */
+    if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
+    {
+        for ( i = gd->max_p - 1; i >= 0; i-- )
+        {
+            invalidate_device(dev+i, 1);
+            gd->part[MINOR(dev+i)].start_sect = 0;
+            gd->part[MINOR(dev+i)].nr_sects   = 0;
+            gd->sizes[MINOR(dev+i)]           = 0;
+        }
+
+        grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
+    }
+
+ out:
+    up(&bd->bd_sem);
+    bdput(bd);
+    return rc;
+}
+#endif
+
+
+/*
+ * blkif_queue_request
+ *
+ * request block io 
+ * 
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ *   virtual address in the guest os.
+ */
+static int blkif_queue_request(struct request *req)
+{
+       struct xlbd_disk_info *di =
+               (struct xlbd_disk_info *)req->rq_disk->private_data;
+       unsigned long buffer_ma;
+       blkif_request_t *ring_req;
+       struct bio *bio;
+       struct bio_vec *bvec;
+       int idx, s;
+        unsigned int fsect, lsect;
+
+        if (unlikely(blkif_state != BLKIF_STATE_CONNECTED))
+                return 1;
+
+       /* Fill out a communications ring structure. */
+       ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
+       ring_req->id = (unsigned long)req;
+       ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
+               BLKIF_OP_READ;
+       ring_req->sector_number = (blkif_sector_t)req->sector;
+       ring_req->device = di->xd_device;
+
+       s = 0;
+       ring_req->nr_segments = 0;
+       rq_for_each_bio(bio, req) {
+               bio_for_each_segment(bvec, bio, idx) {
+                       buffer_ma =
+                                phys_to_machine(page_to_phys(bvec->bv_page));
+                       if (unlikely((buffer_ma & ((1<<9)-1)) != 0))
+                               BUG();
+
+                        fsect = bvec->bv_offset >> 9;
+                        lsect = fsect + (bvec->bv_len >> 9) - 1;
+                        if (unlikely(lsect > 7))
+                                BUG();
+
+                       ring_req->frame_and_sects[ring_req->nr_segments++] =
+                               buffer_ma | (fsect << 3) | lsect;
+                       s += bvec->bv_len >> 9;
+               }
+       }
+
+       req_prod++;
+
+        /* Keep a private copy so we can reissue requests when recovering. */
+        blk_ring_rec->ring[MASK_BLKIF_IDX(blk_ring_rec->req_prod)].req =
+                *ring_req;
+        blk_ring_rec->req_prod++;
+
+        return 0;
+}
+
+/*
+ * do_blkif_request
+ *  read a block; request is in a request queue
+ */
+void do_blkif_request(request_queue_t *rq)
+{
+       struct request *req;
+       int queued;
+
+       DPRINTK("Entered do_blkif_request\n"); 
+
+       queued = 0;
+
+       while ((req = elv_next_request(rq)) != NULL) {
+               if (!blk_fs_request(req)) {
+                       end_request(req, 0);
+                       continue;
+               }
+
+               if (BLKIF_RING_FULL) {
+                       blk_stop_queue(rq);
+                       break;
+               }
+               DPRINTK("do_blkif_request %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
+                   req, req->cmd, req->sector, req->current_nr_sectors,
+                   req->nr_sectors, req->buffer,
+                   rq_data_dir(req) ? "write" : "read");
+                blkdev_dequeue_request(req);
+               if (blkif_queue_request(req)) {
+                        blk_stop_queue(rq);
+                        break;
+                }
+               queued++;
+       }
+
+       if (queued != 0)
+               flush_requests();
+}
+
+
+static void kick_pending_request_queues(void)
+{
+    /* We kick pending request queues if the ring is reasonably empty. */
+    if ( (nr_pending != 0) && 
+         ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
+    {
+        /* Attempt to drain the queue, but bail if the ring becomes full. */
+        while ( (nr_pending != 0) && !BLKIF_RING_FULL )
+            do_blkif_request(pending_queues[--nr_pending]);
+    }
+}
+
+
+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+       struct request *req;
+       blkif_response_t *bret;
+       BLKIF_RING_IDX i; 
+       unsigned long flags; 
+
+       spin_lock_irqsave(&blkif_io_lock, flags);     
+
+        if (unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery)) {
+                printk("Bailed out\n");
+        
+                spin_unlock_irqrestore(&blkif_io_lock, flags);
+                return IRQ_HANDLED;
+        }
+
+       for (i = resp_cons; i != blk_ring->resp_prod; i++) {
+               bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
+               switch (bret->operation) {
+               case BLKIF_OP_READ:
+               case BLKIF_OP_WRITE:
+                       if (unlikely(bret->status != BLKIF_RSP_OKAY))
+                               DPRINTK("Bad return from blkdev data request: %lx\n",
+                                   bret->status);
+                       req = (struct request *)bret->id;
+                        /* XXXcl pass up status */
+                       if (unlikely(end_that_request_first(req, 1,
+                           req->hard_nr_sectors)))
+                               BUG();
+
+                       end_that_request_last(req);
+                       break;
+                case BLKIF_OP_PROBE:
+                        memcpy(&blkif_control_rsp, bret, sizeof(*bret));
+                        blkif_control_rsp_valid = 1;
+                        break;
+               default:
+                       BUG();
+               }
+       }
+    
+       resp_cons = i;
+        resp_cons_rec = i;
+
+       if (xlbd_blk_queue &&
+            test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags)) {
+               blk_start_queue(xlbd_blk_queue);
+               /* XXXcl call to request_fn should not be needed but
+                 * we get stuck without...  needs investigating
+                */
+               xlbd_blk_queue->request_fn(xlbd_blk_queue);
+       }
+
+       spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+
+void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
+{
+    unsigned long flags;
+
+ retry:
+    while ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(1);
+    }
+
+    spin_lock_irqsave(&blkif_io_lock, flags);
+    if ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
+    {
+        spin_unlock_irqrestore(&blkif_io_lock, flags);
+        goto retry;
+    }
+
+    memcpy(&blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req, req, sizeof(*req));
+    memcpy(&blk_ring_rec->ring[MASK_BLKIF_IDX(blk_ring_rec->req_prod++)].req,
+           req, sizeof(*req));
+    req_prod++;
+    flush_requests();
+
+    spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+    while ( !blkif_control_rsp_valid )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(1);
+    }
+
+    memcpy(rsp, &blkif_control_rsp, sizeof(*rsp));
+    blkif_control_rsp_valid = 0;
+}
+
+
+static void blkif_status_change(blkif_fe_interface_status_changed_t *status)
+{
+    ctrl_msg_t                   cmsg;
+    blkif_fe_interface_connect_t up;
+
+    if ( status->handle != 0 )
+    {
+        printk(KERN_WARNING "Status change on unsupported blkif %d\n",
+               status->handle);
+        return;
+    }
+
+    switch ( status->status )
+    {
+    case BLKIF_INTERFACE_STATUS_DESTROYED:
+        printk(KERN_WARNING "Unexpected blkif-DESTROYED message in state %d\n",
+               blkif_state);
+        break;
+
+    case BLKIF_INTERFACE_STATUS_DISCONNECTED:
+        if ( blkif_state != BLKIF_STATE_CLOSED )
+        {
+            printk(KERN_WARNING "Unexpected blkif-DISCONNECTED message"
+                   " in state %d\n", blkif_state);
+
+            printk(KERN_INFO "VBD driver recovery in progress\n");
+            
+            /* Prevent new requests being issued until we fix things up. */
+            spin_lock_irq(&blkif_io_lock);
+            recovery = 1;
+            blkif_state = BLKIF_STATE_DISCONNECTED;
+            spin_unlock_irq(&blkif_io_lock);
+
+            /* Free resources associated with old device channel. */
+            free_page((unsigned long)blk_ring);
+            free_irq(blkif_irq, NULL);
+            unbind_evtchn_from_irq(blkif_evtchn);
+        }
+
+        /* Move from CLOSED to DISCONNECTED state. */
+        blk_ring = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
+        blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
+        blkif_state  = BLKIF_STATE_DISCONNECTED;
+
+        /* Construct an interface-CONNECT message for the domain controller. */
+        cmsg.type      = CMSG_BLKIF_FE;
+        cmsg.subtype   = CMSG_BLKIF_FE_INTERFACE_CONNECT;
+        cmsg.length    = sizeof(blkif_fe_interface_connect_t);
+        up.handle      = 0;
+        up.shmem_frame = virt_to_machine(blk_ring) >> PAGE_SHIFT;
+        memcpy(cmsg.msg, &up, sizeof(up));
+        
+        /* Tell the controller to bring up the interface. */
+        ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+        break;
+
+    case BLKIF_INTERFACE_STATUS_CONNECTED:
+        if ( blkif_state == BLKIF_STATE_CLOSED )
+        {
+            printk(KERN_WARNING "Unexpected blkif-CONNECTED message"
+                   " in state %d\n", blkif_state);
+            break;
+        }
+
+        blkif_evtchn = status->evtchn;
+        blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
+        (void)request_irq(blkif_irq, blkif_int, 0, "blkif", NULL);
+
+        if ( recovery )
+        {
+            int i;
+
+           /* Shouldn't need the blkif_io_lock here - the device is
+            * plugged and the recovery flag prevents the interrupt handler
+            * changing anything. */
+
+            /* Reissue requests from the private block ring. */
+            for ( i = 0;
+                 resp_cons_rec < blk_ring_rec->req_prod;
+                  resp_cons_rec++, i++ )
+            {
+                blk_ring->ring[i].req
+                    = blk_ring_rec->ring[MASK_BLKIF_IDX(resp_cons_rec)].req;
+            }
+
+            /* Reset the private block ring to match the new ring. */
+            memcpy(blk_ring_rec, blk_ring, sizeof(*blk_ring));
+            resp_cons_rec = 0;
+
+            /* blk_ring->req_prod will be set when we flush_requests().*/
+            blk_ring_rec->req_prod = req_prod = i;
+
+            wmb();
+
+            /* Switch off recovery mode, using a memory barrier to ensure that
+             * it's seen before we flush requests - we don't want to miss any
+             * interrupts. */
+            recovery = 0;
+            wmb();
+
+            /* Kicks things back into life. */
+            flush_requests();
+        }
+        else
+        {
+            /* Probe for discs that are attached to the interface. */
+            xlvbd_init();
+        }
+
+        blkif_state = BLKIF_STATE_CONNECTED;
+        
+        /* Kick pending requests. */
+        spin_lock_irq(&blkif_io_lock);
+        kick_pending_request_queues();
+        spin_unlock_irq(&blkif_io_lock);
+
+        break;
+
+    default:
+        printk(KERN_WARNING "Status change to unknown value %d\n", 
+               status->status);
+        break;
+    }
+}
+
+
+static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+    switch ( msg->subtype )
+    {
+    case CMSG_BLKIF_FE_INTERFACE_STATUS_CHANGED:
+        if ( msg->length != sizeof(blkif_fe_interface_status_changed_t) )
+            goto parse_error;
+        blkif_status_change((blkif_fe_interface_status_changed_t *)
+                            &msg->msg[0]);
+        break;        
+#if 0
+    case CMSG_BLKIF_FE_VBD_STATUS_CHANGED:
+        update_tq.routine = update_vbds_task;
+        schedule_task(&update_tq);
+        break;
+#endif
+    default:
+        goto parse_error;
+    }
+
+    ctrl_if_send_response(msg);
+    return;
+
+ parse_error:
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+
+int __init xlblk_init(void)
+{
+    ctrl_msg_t                       cmsg;
+    blkif_fe_driver_status_changed_t st;
+
+    if ( (start_info.flags & SIF_INITDOMAIN) 
+        || (start_info.flags & SIF_BLK_BE_DOMAIN) )
+        return 0;
+
+    printk(KERN_INFO "Initialising Xen virtual block device\n");
+
+    blk_ring_rec = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
+    memset(blk_ring_rec, 0, sizeof(*blk_ring_rec));
+
+    (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
+                                    CALLBACK_IN_BLOCKING_CONTEXT);
+
+    /* Send a driver-UP notification to the domain controller. */
+    cmsg.type      = CMSG_BLKIF_FE;
+    cmsg.subtype   = CMSG_BLKIF_FE_DRIVER_STATUS_CHANGED;
+    cmsg.length    = sizeof(blkif_fe_driver_status_changed_t);
+    st.status      = BLKIF_DRIVER_STATUS_UP;
+    memcpy(cmsg.msg, &st, sizeof(st));
+    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+
+    /*
+     * We should read 'nr_interfaces' from response message and wait
+     * for notifications before proceeding. For now we assume that we
+     * will be notified of exactly one interface.
+     */
+    while ( blkif_state != BLKIF_STATE_CONNECTED )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(1);
+    }
+
+    return 0;
+#if 0
+       int error; 
+
+       reset_xlblk_interface();
+
+       xlblk_response_irq = bind_virq_to_irq(VIRQ_BLKDEV);
+       xlblk_update_irq   = bind_virq_to_irq(VIRQ_VBD_UPD);
+
+       error = request_irq(xlblk_response_irq, xlblk_response_int, 
+                           SA_SAMPLE_RANDOM, "blkdev", NULL);
+       if (error) {
+               printk(KERN_ALERT "Could not allocate receive interrupt\n");
+               goto fail;
+       }
+
+       error = request_irq(xlblk_update_irq, xlblk_update_int,
+                           0, "blkdev", NULL);
+       if (error) {
+               printk(KERN_ALERT
+                      "Could not allocate block update interrupt\n");
+               goto fail;
+       }
+
+       (void)xlvbd_init();
+
+       return 0;
+
+ fail:
+       return error;
+#endif
+}
+
+
+static void __exit xlblk_cleanup(void)
+{
+    /* XXX FIXME */
+    BUG();
+#if 0
+       /*  xlvbd_cleanup(); */
+       free_irq(xlblk_response_irq, NULL);
+       free_irq(xlblk_update_irq, NULL);
+       unbind_virq_from_irq(VIRQ_BLKDEV);
+       unbind_virq_from_irq(VIRQ_VBD_UPD);
+#endif
+}
+
+
+module_init(xlblk_init);
+module_exit(xlblk_cleanup);
+
+
+void blkdev_suspend(void)
+{
+}
+
+
+void blkdev_resume(void)
+{
+}
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/block/block.h b/linux-2.6.7-xen-sparse/drivers/xen/block/block.h
new file mode 100644 (file)
index 0000000..77002dd
--- /dev/null
@@ -0,0 +1,92 @@
+/******************************************************************************
+ * block.h
+ * 
+ * Shared definitions between all levels of XenLinux Virtual block devices.
+ */
+
+#ifndef __XEN_DRIVERS_BLOCK_H__
+#define __XEN_DRIVERS_BLOCK_H__
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/major.h>
+
+#include <linux/devfs_fs_kernel.h>
+
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include <asm-xen/blkif.h>
+
+#if 0
+#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+#if 0
+#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+#else
+#define DPRINTK_IOCTL(_f, _a...) ((void)0)
+#endif
+
+struct xlbd_type_info {
+       int partn_shift;
+       int devs_per_major;
+       int hardsect_size;
+       int max_sectors;
+       char *name;
+};
+
+/*
+ * We have one of these per vbd, whether ide, scsi or 'other'.  They
+ * hang in private_data off the gendisk structure. We may end up
+ * putting all kinds of interesting stuff here :-)
+ */
+struct xlbd_major_info {
+       int major;
+       int usage;
+       int xd_device;
+       struct xlbd_type_info *type;
+};
+
+struct xlbd_disk_info {
+       int xd_device;
+       struct xlbd_major_info *mi;
+};
+
+typedef struct xen_block {
+       int usage;
+} xen_block_t;
+
+extern struct request_queue *xlbd_blk_queue;
+extern spinlock_t blkif_io_lock;
+
+extern int blkif_open(struct inode *inode, struct file *filep);
+extern int blkif_release(struct inode *inode, struct file *filep);
+extern int blkif_ioctl(struct inode *inode, struct file *filep,
+                           unsigned command, unsigned long argument);
+extern int blkif_check(dev_t dev);
+extern int blkif_revalidate(dev_t dev);
+extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp);
+extern void do_blkif_request (request_queue_t *rq); 
+
+extern void xlvbd_update_vbds(void);
+
+/* Virtual block-device subsystem. */
+extern int  xlvbd_init(void);
+extern void xlvbd_cleanup(void); 
+
+#endif /* __XEN_DRIVERS_BLOCK_H__ */
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/block/vbd.c b/linux-2.6.7-xen-sparse/drivers/xen/block/vbd.c
new file mode 100644 (file)
index 0000000..e7c5453
--- /dev/null
@@ -0,0 +1,530 @@
+/******************************************************************************
+ * vbd.c
+ * 
+ * XenLinux virtual block-device driver (xvd).
+ * 
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ */
+
+#include "block.h"
+#include <linux/blkdev.h>
+
+/*
+ * For convenience we distinguish between ide, scsi and 'other' (i.e.
+ * potentially combinations of the two) in the naming scheme and in a few 
+ * other places (like default readahead, etc).
+ */
+
+#define NUM_IDE_MAJORS 10
+#define NUM_SCSI_MAJORS 9
+#define NUM_VBD_MAJORS 1
+
+static struct xlbd_type_info xlbd_ide_type = {
+       .partn_shift = 6,
+       // XXXcl todo blksize_size[major]  = 1024;
+       .hardsect_size = 512,
+       .max_sectors = 128,  /* 'hwif->rqsize' if we knew it */
+       // XXXcl todo read_ahead[major]    = 8; /* from drivers/ide/ide-probe.c */
+       .name = "hd",
+};
+
+static struct xlbd_type_info xlbd_scsi_type = {
+       .partn_shift = 4,
+       // XXXcl todo blksize_size[major]  = 1024; /* XXX 512; */
+       .hardsect_size = 512,
+       .max_sectors = 128*8, /* XXX 128; */
+       // XXXcl todo read_ahead[major]    = 0; /* XXX 8; -- guessing */
+       .name = "sd",
+};
+
+static struct xlbd_type_info xlbd_vbd_type = {
+       .partn_shift = 4,
+       // XXXcl todo blksize_size[major]  = 512;
+       .hardsect_size = 512,
+       .max_sectors = 128,
+       // XXXcl todo read_ahead[major]    = 8;
+       .name = "xvd",
+};
+
+/* XXXcl handle cciss after finding out why it's "hacked" in */
+
+static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
+                                        NUM_VBD_MAJORS];
+
+/* Information about our VBDs. */
+#define MAX_VBDS 64
+static int nr_vbds;
+static vdisk_t *vbd_info;
+
+struct request_queue *xlbd_blk_queue = NULL;
+
+#define MAJOR_XEN(dev) ((dev)>>8)
+#define MINOR_XEN(dev) ((dev) & 0xff)
+
+static struct block_device_operations xlvbd_block_fops = 
+{
+       .owner          = THIS_MODULE,
+       .open           = blkif_open,
+       .release        = blkif_release,
+       .ioctl          = blkif_ioctl,
+#if 0
+    check_media_change: blkif_check,
+    revalidate:         blkif_revalidate,
+#endif
+};
+
+spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED;
+
+static int xlvbd_get_vbd_info(vdisk_t *disk_info)
+{
+    vdisk_t         *buf = (vdisk_t *)__get_free_page(GFP_KERNEL);
+    blkif_request_t  req;
+    blkif_response_t rsp;
+    int              nr;
+
+    memset(&req, 0, sizeof(req));
+    req.operation   = BLKIF_OP_PROBE;
+    req.nr_segments = 1;
+    req.frame_and_sects[0] = virt_to_machine(buf) | 7;
+
+    blkif_control_send(&req, &rsp);
+
+    if ( rsp.status <= 0 )
+    {
+        printk(KERN_ALERT "Could not probe disks (%d)\n", rsp.status);
+        return -1;
+    }
+
+    if ( (nr = rsp.status) > MAX_VBDS )
+         nr = MAX_VBDS;
+    memcpy(disk_info, buf, nr * sizeof(vdisk_t));
+
+    return nr;
+}
+
+static struct xlbd_major_info *xlbd_get_major_info(int xd_device, int *minor)
+{
+       int mi_idx, new_major;
+       int xd_major = MAJOR_XEN(xd_device); 
+       int xd_minor = MINOR_XEN(xd_device);
+
+       *minor = xd_minor;
+
+       switch (xd_major) {
+       case IDE0_MAJOR: mi_idx = 0; new_major = IDE0_MAJOR; break;
+       case IDE1_MAJOR: mi_idx = 1; new_major = IDE1_MAJOR; break;
+       case IDE2_MAJOR: mi_idx = 2; new_major = IDE2_MAJOR; break;
+       case IDE3_MAJOR: mi_idx = 3; new_major = IDE3_MAJOR; break;
+       case IDE4_MAJOR: mi_idx = 4; new_major = IDE4_MAJOR; break;
+       case IDE5_MAJOR: mi_idx = 5; new_major = IDE5_MAJOR; break;
+       case IDE6_MAJOR: mi_idx = 6; new_major = IDE6_MAJOR; break;
+       case IDE7_MAJOR: mi_idx = 7; new_major = IDE7_MAJOR; break;
+       case IDE8_MAJOR: mi_idx = 8; new_major = IDE8_MAJOR; break;
+       case IDE9_MAJOR: mi_idx = 9; new_major = IDE9_MAJOR; break;
+       case SCSI_DISK0_MAJOR: mi_idx = 10; new_major = SCSI_DISK0_MAJOR; break;
+       case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
+               mi_idx = 11 + xd_major - SCSI_DISK1_MAJOR;
+               new_major = SCSI_DISK1_MAJOR + xd_major - SCSI_DISK1_MAJOR;
+               break;
+       case SCSI_CDROM_MAJOR: mi_idx = 18; new_major = SCSI_CDROM_MAJOR; break;
+       default: mi_idx = 19; new_major = 0;/* XXXcl notyet */ break;
+       }
+
+       if (major_info[mi_idx])
+               return major_info[mi_idx];
+
+       major_info[mi_idx] = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
+       if (major_info[mi_idx] == NULL)
+               return NULL;
+
+       memset(major_info[mi_idx], 0, sizeof(struct xlbd_major_info));
+
+       switch (mi_idx) {
+       case 0 ... (NUM_IDE_MAJORS - 1):
+               major_info[mi_idx]->type = &xlbd_ide_type;
+               break;
+       case NUM_IDE_MAJORS ... (NUM_IDE_MAJORS + NUM_SCSI_MAJORS - 1):
+               major_info[mi_idx]->type = &xlbd_scsi_type;
+               break;
+       case (NUM_IDE_MAJORS + NUM_SCSI_MAJORS) ...
+               (NUM_IDE_MAJORS + NUM_SCSI_MAJORS + NUM_VBD_MAJORS - 1):
+               major_info[mi_idx]->type = &xlbd_vbd_type;
+               break;
+       }
+       major_info[mi_idx]->major = new_major;
+
+       if (register_blkdev(major_info[mi_idx]->major, major_info[mi_idx]->type->name)) {
+               printk(KERN_ALERT "XL VBD: can't get major %d with name %s\n",
+                   major_info[mi_idx]->major, major_info[mi_idx]->type->name);
+               goto out;
+       }
+
+       devfs_mk_dir(major_info[mi_idx]->type->name);
+
+       return major_info[mi_idx];
+
+ out:
+       kfree(major_info[mi_idx]);
+       major_info[mi_idx] = NULL;
+       return NULL;
+}
+
+static struct gendisk *xlvbd_get_gendisk(struct xlbd_major_info *mi,
+                                        int xd_minor, vdisk_t *xd)
+{
+       struct gendisk *gd;
+       struct xlbd_disk_info *di;
+       int device, partno;
+
+       device = MKDEV(mi->major, xd_minor);
+       gd = get_gendisk(device, &partno);
+       if (gd)
+               return gd;
+
+       di = kmalloc(sizeof(struct xlbd_disk_info), GFP_KERNEL);
+       if (di == NULL)
+               return NULL;
+       di->mi = mi;
+       di->xd_device = xd->device;
+
+       /* Construct an appropriate gendisk structure. */
+       gd = alloc_disk(1);
+       if (gd == NULL)
+               goto out;
+
+       gd->major = mi->major;
+       gd->first_minor = xd_minor;
+       gd->fops = &xlvbd_block_fops;
+       gd->private_data = di;
+       sprintf(gd->disk_name, "%s%c%d", mi->type->name,
+           'a' + (xd_minor >> mi->type->partn_shift),
+           xd_minor & ((1 << mi->type->partn_shift) - 1));
+       /*  sprintf(gd->devfs_name, "%s%s/disc%d", mi->type->name, , ); XXXdevfs */
+
+       set_capacity(gd, xd->capacity);
+
+       if (xlbd_blk_queue == NULL) {
+               xlbd_blk_queue = blk_init_queue(do_blkif_request,
+                                               &blkif_io_lock);
+               if (xlbd_blk_queue == NULL)
+                       goto out;
+               elevator_init(xlbd_blk_queue, &elevator_noop);
+
+               /*
+                * Turn off barking 'headactive' mode. We dequeue
+                * buffer heads as soon as we pass them to back-end
+                * driver.
+                */
+               blk_queue_headactive(xlbd_blk_queue, 0); /* XXXcl: noop according to blkdev.h */
+
+               blk_queue_hardsect_size(xlbd_blk_queue,
+                                       mi->type->hardsect_size);
+               blk_queue_max_sectors(xlbd_blk_queue, mi->type->max_sectors); /* 'hwif->rqsize' if we knew it */
+
+               /* XXXcl: set mask to PAGE_SIZE for now, to improve either use 
+                  - blk_queue_merge_bvec to merge requests with adjacent ma's
+                  - the tags infrastructure
+                  - the dma infrastructure
+               */
+               blk_queue_segment_boundary(xlbd_blk_queue, PAGE_SIZE - 1);
+
+               blk_queue_max_phys_segments(xlbd_blk_queue,
+                    BLKIF_MAX_SEGMENTS_PER_REQUEST);
+               blk_queue_max_hw_segments(xlbd_blk_queue,
+                    BLKIF_MAX_SEGMENTS_PER_REQUEST); /* XXXcl not needed? */
+
+
+       }
+       gd->queue = xlbd_blk_queue;
+
+       add_disk(gd);
+
+       return gd;
+
+ out:
+       if (gd)
+               del_gendisk(gd);
+       kfree(di);
+       return NULL;
+}
+
+/*
+ * xlvbd_init_device - initialise a VBD device
+ * @disk:              a vdisk_t describing the VBD
+ *
+ * Takes a vdisk_t * that describes a VBD the domain has access to.
+ * Performs appropriate initialisation and registration of the device.
+ *
+ * Care needs to be taken when making re-entrant calls to ensure that
+ * corruption does not occur.  Also, devices that are in use should not have
+ * their details updated.  This is the caller's responsibility.
+ */
+static int xlvbd_init_device(vdisk_t *xd)
+{
+       struct block_device *bd;
+       struct gendisk *gd;
+       struct xlbd_major_info *mi;
+       int device;
+       int minor;
+
+       int err = -ENOMEM;
+
+       mi = xlbd_get_major_info(xd->device, &minor);
+       if (mi == NULL)
+               return -EPERM;
+
+       device = MKDEV(mi->major, minor);
+
+       if ((bd = bdget(device)) == NULL)
+               return -EPERM;
+
+       /*
+        * Update of partition info, and check of usage count, is protected
+        * by the per-block-device semaphore.
+        */
+       down(&bd->bd_sem);
+
+       gd = xlvbd_get_gendisk(mi, minor, xd);
+       if (mi == NULL) {
+               err = -EPERM;
+               goto out;
+       }
+
+       if (VDISK_READONLY(xd->info))
+               set_disk_ro(gd, 1); 
+
+       /* Some final fix-ups depending on the device type */
+       switch (VDISK_TYPE(xd->info)) { 
+       case VDISK_TYPE_CDROM:
+               gd->flags |= GENHD_FL_REMOVABLE | GENHD_FL_CD; 
+               /* FALLTHROUGH */
+       case VDISK_TYPE_FLOPPY: 
+       case VDISK_TYPE_TAPE:
+               gd->flags |= GENHD_FL_REMOVABLE; 
+               break; 
+
+       case VDISK_TYPE_DISK:
+               break; 
+
+       default:
+               printk(KERN_ALERT "XenLinux: unknown device type %d\n", 
+                   VDISK_TYPE(xd->info)); 
+               break; 
+       }
+
+       err = 0;
+ out:
+       up(&bd->bd_sem);
+       bdput(bd);    
+       return err;
+}
+
+#if 0
+/*
+ * xlvbd_remove_device - remove a device node if possible
+ * @device:       numeric device ID
+ *
+ * Updates the gendisk structure and invalidates devices.
+ *
+ * This is OK for now but in future, should perhaps consider where this should
+ * deallocate gendisks / unregister devices.
+ */
+static int xlvbd_remove_device(int device)
+{
+    int i, rc = 0, minor = MINOR(device);
+    struct gendisk *gd;
+    struct block_device *bd;
+    xen_block_t *disk = NULL;
+
+    if ( (bd = bdget(device)) == NULL )
+        return -1;
+
+    /*
+     * Update of partition info, and check of usage count, is protected
+     * by the per-block-device semaphore.
+     */
+    down(&bd->bd_sem);
+
+    if ( ((gd = get_gendisk(device)) == NULL) ||
+         ((disk = xldev_to_xldisk(device)) == NULL) )
+        BUG();
+
+    if ( disk->usage != 0 )
+    {
+        printk(KERN_ALERT "VBD removal failed - in use [dev=%x]\n", device);
+        rc = -1;
+        goto out;
+    }
+    if ( (minor & (gd->max_p-1)) != 0 )
+    {
+        /* 1: The VBD is mapped to a partition rather than a whole unit. */
+        invalidate_device(device, 1);
+       gd->part[minor].start_sect = 0;
+        gd->part[minor].nr_sects   = 0;
+        gd->sizes[minor]           = 0;
+
+        /* Clear the consists-of-virtual-partitions flag if possible. */
+        gd->flags[minor >> gd->minor_shift] &= ~GENHD_FL_VIRT_PARTNS;
+        for ( i = 1; i < gd->max_p; i++ )
+            if ( gd->sizes[(minor & ~(gd->max_p-1)) + i] != 0 )
+                gd->flags[minor >> gd->minor_shift] |= GENHD_FL_VIRT_PARTNS;
+
+        /*
+         * If all virtual partitions are now gone, and a 'whole unit' VBD is
+         * present, then we can try to grok the unit's real partition table.
+         */
+        if ( !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) &&
+             (gd->sizes[minor & ~(gd->max_p-1)] != 0) &&
+             !(gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE) )
+        {
+            register_disk(gd,
+                          device&~(gd->max_p-1), 
+                          gd->max_p, 
+                          &xlvbd_block_fops,
+                          gd->part[minor&~(gd->max_p-1)].nr_sects);
+        }
+    }
+    else
+    {
+        /*
+         * 2: The VBD is mapped to an entire 'unit'. Clear all partitions.
+         * NB. The partition entries are only cleared if there are no VBDs
+         * mapped to individual partitions on this unit.
+         */
+        i = gd->max_p - 1; /* Default: clear subpartitions as well. */
+        if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS )
+            i = 0; /* 'Virtual' mode: only clear the 'whole unit' entry. */
+        while ( i >= 0 )
+        {
+            invalidate_device(device+i, 1);
+            gd->part[minor+i].start_sect = 0;
+            gd->part[minor+i].nr_sects   = 0;
+            gd->sizes[minor+i]           = 0;
+            i--;
+        }
+    }
+
+ out:
+    up(&bd->bd_sem);
+    bdput(bd);
+    return rc;
+}
+
+/*
+ * xlvbd_update_vbds - reprobes the VBD status and performs updates driver
+ * state. The VBDs need to be updated in this way when the domain is
+ * initialised and also each time we receive an XLBLK_UPDATE event.
+ */
+void xlvbd_update_vbds(void)
+{
+    int i, j, k, old_nr, new_nr;
+    vdisk_t *old_info, *new_info, *merged_info;
+
+    old_info = vbd_info;
+    old_nr   = nr_vbds;
+
+    new_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
+    if ( unlikely(new_nr = xlvbd_get_vbd_info(new_info)) < 0 )
+    {
+        kfree(new_info);
+        return;
+    }
+
+    /*
+     * Final list maximum size is old list + new list. This occurs only when
+     * old list and new list do not overlap at all, and we cannot yet destroy
+     * VBDs in the old list because the usage counts are busy.
+     */
+    merged_info = kmalloc((old_nr + new_nr) * sizeof(vdisk_t), GFP_KERNEL);
+
+    /* @i tracks old list; @j tracks new list; @k tracks merged list. */
+    i = j = k = 0;
+
+    while ( (i < old_nr) && (j < new_nr) )
+    {
+        if ( old_info[i].device < new_info[j].device )
+        {
+            if ( xlvbd_remove_device(old_info[i].device) != 0 )
+                memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
+            i++;
+        }
+        else if ( old_info[i].device > new_info[j].device )
+        {
+            if ( xlvbd_init_device(&new_info[j]) == 0 )
+                memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
+            j++;
+        }
+        else
+        {
+            if ( ((old_info[i].capacity == new_info[j].capacity) &&
+                  (old_info[i].info == new_info[j].info)) ||
+                 (xlvbd_remove_device(old_info[i].device) != 0) )
+                memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
+            else if ( xlvbd_init_device(&new_info[j]) == 0 )
+                memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
+            i++; j++;
+        }
+    }
+
+    for ( ; i < old_nr; i++ )
+    {
+        if ( xlvbd_remove_device(old_info[i].device) != 0 )
+            memcpy(&merged_info[k++], &old_info[i], sizeof(vdisk_t));
+    }
+
+    for ( ; j < new_nr; j++ )
+    {
+        if ( xlvbd_init_device(&new_info[j]) == 0 )
+            memcpy(&merged_info[k++], &new_info[j], sizeof(vdisk_t));
+    }
+
+    vbd_info = merged_info;
+    nr_vbds  = k;
+
+    kfree(old_info);
+    kfree(new_info);
+}
+#endif
+
+/*
+ * Set up all the linux device goop for the virtual block devices
+ * (vbd's) that we know about. Note that although from the backend
+ * driver's p.o.v. VBDs are addressed simply an opaque 16-bit device
+ * number, the domain creation tools conventionally allocate these
+ * numbers to correspond to those used by 'real' linux -- this is just
+ * for convenience as it means e.g. that the same /etc/fstab can be
+ * used when booting with or without Xen.
+ */
+int xlvbd_init(void)
+{
+       int i;
+
+       /*
+        * If compiled as a module, we don't support unloading yet. We
+        * therefore permanently increment the reference count to
+        * disallow it.
+        */
+       MOD_INC_USE_COUNT;
+
+       memset(major_info, 0, sizeof(major_info));
+
+       for (i = 0; i < sizeof(major_info) / sizeof(major_info[0]); i++) {
+       }
+
+       vbd_info = kmalloc(MAX_VBDS * sizeof(vdisk_t), GFP_KERNEL);
+       nr_vbds  = xlvbd_get_vbd_info(vbd_info);
+
+       if (nr_vbds < 0) {
+               kfree(vbd_info);
+               vbd_info = NULL;
+               nr_vbds  = 0;
+       } else {
+               for (i = 0; i < nr_vbds; i++)
+                       xlvbd_init_device(&vbd_info[i]);
+       }
+
+       return 0;
+}
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/console/Makefile b/linux-2.6.7-xen-sparse/drivers/xen/console/Makefile
new file mode 100644 (file)
index 0000000..e4ffdef
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y  := console.o
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/console/console.c b/linux-2.6.7-xen-sparse/drivers/xen/console/console.c
new file mode 100644 (file)
index 0000000..40bb720
--- /dev/null
@@ -0,0 +1,618 @@
+/******************************************************************************
+ * console.c
+ * 
+ * Virtual console driver.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <asm-xen/evtchn.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/hypervisor.h>
+#include <asm/hypervisor-ifs/event_channel.h>
+#include <asm-xen/ctrl_if.h>
+
+/*
+ * Modes:
+ *  'xencons=off'  [XC_OFF]:     Console is disabled.
+ *  'xencons=tty'  [XC_TTY]:     Console attached to '/dev/tty[0-9]+'.
+ *  'xencons=ttyS' [XC_SERIAL]:  Console attached to '/dev/ttyS[0-9]+'.
+ *                 [XC_DEFAULT]: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
+ * 
+ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
+ * warnings from standard distro startup scripts.
+ */
+static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
+
+static int __init xencons_setup(char *str)
+{
+    if ( !strcmp(str, "tty") )
+        xc_mode = XC_TTY;
+    else if ( !strcmp(str, "ttyS") )
+        xc_mode = XC_SERIAL;
+    else if ( !strcmp(str, "off") )
+        xc_mode = XC_OFF;
+    return 1;
+}
+__setup("xencons", xencons_setup);
+
+/* The kernel and user-land drivers share a common transmit buffer. */
+#define WBUF_SIZE     4096
+#define WBUF_MASK(_i) ((_i)&(WBUF_SIZE-1))
+static char wbuf[WBUF_SIZE];
+static unsigned int wc, wp; /* write_cons, write_prod */
+
+/* This lock protects accesses to the common transmit buffer. */
+static spinlock_t xencons_lock = SPIN_LOCK_UNLOCKED;
+
+static struct tty_driver *xencons_driver;
+
+#define NUM_XENCONS 1
+
+/* Common transmit-kick routine. */
+static void __xencons_tx_flush(void);
+
+/* This task is used to defer sending console data until there is space. */
+static void xencons_tx_flush_task_routine(void *data);
+#if 0                          /* XXXcl tq */
+static struct tq_struct xencons_tx_flush_task = {
+    routine: xencons_tx_flush_task_routine
+};
+#else
+static DECLARE_WORK(xencons_tx_flush_task, xencons_tx_flush_task_routine,
+                    NULL);
+#endif
+
+
+/******************** Kernel console driver ********************************/
+
+static void kcons_write(
+    struct console *c, const char *s, unsigned int count)
+{
+    int           i;
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    
+    for ( i = 0; i < count; i++ )
+    {
+        if ( (wp - wc) >= (WBUF_SIZE - 1) )
+            break;
+        if ( (wbuf[WBUF_MASK(wp++)] = s[i]) == '\n' )
+            wbuf[WBUF_MASK(wp++)] = '\r';
+    }
+
+    __xencons_tx_flush();
+
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void kcons_write_dom0(
+    struct console *c, const char *s, unsigned int count)
+{
+    int rc;
+
+    while ( count > 0 )
+    {
+        if ( (rc = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s)) > 0 )
+        {
+            count -= rc;
+            s += rc;
+        }
+       else
+           break;
+    }
+}
+
+static struct tty_driver *kcons_device(struct console *c, int *index)
+{
+    *index = c->index;
+    return xencons_driver;
+}
+
+static struct console kcons_info = {
+    device:  kcons_device,
+    flags:   CON_PRINTBUFFER,
+    index:   -1
+};
+
+static int __init xen_console_init(void)
+{
+    if ( start_info.flags & SIF_INITDOMAIN )
+    {
+        if ( xc_mode == XC_DEFAULT )
+            xc_mode = XC_SERIAL;
+        kcons_info.write = kcons_write_dom0;
+    }
+    else
+    {
+        if ( xc_mode == XC_DEFAULT )
+            xc_mode = XC_TTY;
+        kcons_info.write = kcons_write;
+    }
+
+    if ( xc_mode == XC_OFF )
+        return 0;
+
+    if ( xc_mode == XC_SERIAL )
+        strcpy(kcons_info.name, "ttyS");
+    else
+        strcpy(kcons_info.name, "tty");
+
+    register_console(&kcons_info);
+    return 0;
+}
+console_initcall(xen_console_init);
+
+
+/*** Useful function for console debugging -- goes straight to Xen. ***/
+asmlinkage int xprintk(const char *fmt, ...)
+{
+    va_list args;
+    int printk_len;
+    static char printk_buf[1024];
+    
+    /* Emit the output into the temporary buffer */
+    va_start(args, fmt);
+    printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+    va_end(args);
+
+    /* Send the processed output directly to Xen. */
+    kcons_write_dom0(NULL, printk_buf, printk_len);
+
+    return 0;
+}
+
+/*** Forcibly flush console data before dying. ***/
+void xencons_force_flush(void)
+{
+    ctrl_msg_t msg;
+    int        sz;
+
+    /* Emergency console is synchronous, so there's nothing to flush. */
+    if ( start_info.flags & SIF_INITDOMAIN )
+        return;
+
+    /*
+     * We use dangerous control-interface functions that require a quiescent
+     * system and no interrupts. Try to ensure this with a global cli().
+     */
+    cli();
+
+    /* Spin until console data is flushed through to the domain controller. */
+    while ( (wc != wp) && !ctrl_if_transmitter_empty() )
+    {
+        /* Interrupts are disabled -- we must manually reap responses. */
+        ctrl_if_discard_responses();
+
+        if ( (sz = wp - wc) == 0 )
+            continue;
+        if ( sz > sizeof(msg.msg) )
+            sz = sizeof(msg.msg);
+        if ( sz > (WBUF_SIZE - WBUF_MASK(wc)) )
+            sz = WBUF_SIZE - WBUF_MASK(wc);
+
+        msg.type    = CMSG_CONSOLE;
+        msg.subtype = CMSG_CONSOLE_DATA;
+        msg.length  = sz;
+        memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
+            
+        if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
+            wc += sz;
+    }
+}
+
+
+/******************** User-space console driver (/dev/console) ************/
+
+static struct termios *xencons_termios[MAX_NR_CONSOLES];
+static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
+static struct tty_struct *xencons_tty;
+static int xencons_priv_irq;
+static char x_char;
+
+/* Non-privileged receive callback. */
+static void xencons_rx(ctrl_msg_t *msg, unsigned long id)
+{
+    int           i;
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    if ( xencons_tty != NULL )
+    {
+        for ( i = 0; i < msg->length; i++ )
+            tty_insert_flip_char(xencons_tty, msg->msg[i], 0);
+        tty_flip_buffer_push(xencons_tty);
+    }
+    spin_unlock_irqrestore(&xencons_lock, flags);
+
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+/* Privileged and non-privileged transmit worker. */
+static void __xencons_tx_flush(void)
+{
+    int        sz, work_done = 0;
+    ctrl_msg_t msg;
+
+    if ( start_info.flags & SIF_INITDOMAIN )
+    {
+        if ( x_char )
+        {
+            kcons_write_dom0(NULL, &x_char, 1);
+            x_char = 0;
+            work_done = 1;
+        }
+
+        while ( wc != wp )
+        {
+            sz = wp - wc;
+            if ( sz > (WBUF_SIZE - WBUF_MASK(wc)) )
+                sz = WBUF_SIZE - WBUF_MASK(wc);
+            kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
+            wc += sz;
+            work_done = 1;
+        }
+    }
+    else
+    {
+        while ( x_char )
+        {
+            msg.type    = CMSG_CONSOLE;
+            msg.subtype = CMSG_CONSOLE_DATA;
+            msg.length  = 1;
+            msg.msg[0]  = x_char;
+
+            if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
+                x_char = 0;
+#if 0                          /* XXXcl tq */
+            else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
+#else
+            else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
+#endif
+                break;
+
+            work_done = 1;
+        }
+
+        while ( wc != wp )
+        {
+            sz = wp - wc;
+            if ( sz > sizeof(msg.msg) )
+                sz = sizeof(msg.msg);
+            if ( sz > (WBUF_SIZE - WBUF_MASK(wc)) )
+                sz = WBUF_SIZE - WBUF_MASK(wc);
+
+            msg.type    = CMSG_CONSOLE;
+            msg.subtype = CMSG_CONSOLE_DATA;
+            msg.length  = sz;
+            memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz);
+            
+            if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 )
+                wc += sz;
+#if 0                          /* XXXcl tq */
+            else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
+#else
+            else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) )
+#endif
+                break;
+
+            work_done = 1;
+        }
+    }
+
+    if ( work_done && (xencons_tty != NULL) )
+    {
+        wake_up_interruptible(&xencons_tty->write_wait);
+        if ( (xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+             (xencons_tty->ldisc.write_wakeup != NULL) )
+            (xencons_tty->ldisc.write_wakeup)(xencons_tty);
+    }
+}
+
+/* Non-privileged transmit kicker. */
+static void xencons_tx_flush_task_routine(void *data)
+{
+    unsigned long flags;
+    spin_lock_irqsave(&xencons_lock, flags);
+    __xencons_tx_flush();
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+/* Privileged receive callback and transmit kicker. */
+static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+    static char   rbuf[16];
+    int           i, l;
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+
+    if ( xencons_tty != NULL )
+    {
+        /* Receive work. */
+        while ( (l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0 )
+            for ( i = 0; i < l; i++ )
+                tty_insert_flip_char(xencons_tty, rbuf[i], 0);
+        if ( xencons_tty->flip.count != 0 )
+            tty_flip_buffer_push(xencons_tty);
+    }
+
+    /* Transmit work. */
+    __xencons_tx_flush();
+
+    spin_unlock_irqrestore(&xencons_lock, flags);
+
+    return IRQ_HANDLED;
+}
+
+static int xencons_write_room(struct tty_struct *tty)
+{
+    return WBUF_SIZE - (wp - wc);
+}
+
+static int xencons_chars_in_buffer(struct tty_struct *tty)
+{
+    return wp - wc;
+}
+
+static void xencons_send_xchar(struct tty_struct *tty, char ch)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    x_char = ch;
+    __xencons_tx_flush();
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void xencons_throttle(struct tty_struct *tty)
+{
+    if ( I_IXOFF(tty) )
+        xencons_send_xchar(tty, STOP_CHAR(tty));
+}
+
+static void xencons_unthrottle(struct tty_struct *tty)
+{
+    if ( I_IXOFF(tty) )
+    {
+        if ( x_char != 0 )
+            x_char = 0;
+        else
+            xencons_send_xchar(tty, START_CHAR(tty));
+    }
+}
+
+static void xencons_flush_buffer(struct tty_struct *tty)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    wc = wp = 0;
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static inline int __xencons_put_char(int ch)
+{
+    char _ch = (char)ch;
+    if ( (wp - wc) == WBUF_SIZE )
+        return 0;
+    wbuf[WBUF_MASK(wp++)] = _ch;
+    return 1;
+}
+
+static int xencons_write(struct tty_struct *tty, int from_user,
+                       const u_char * buf, int count)
+{
+    int i;
+    unsigned long flags;
+
+    if ( from_user && verify_area(VERIFY_READ, buf, count) )
+        return -EINVAL;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+
+    for ( i = 0; i < count; i++ )
+    {
+        char ch;
+        if ( from_user )
+            __get_user(ch, buf + i);
+        else
+            ch = buf[i];
+        if ( !__xencons_put_char(ch) )
+            break;
+    }
+
+    if ( i != 0 )
+        __xencons_tx_flush();
+
+    spin_unlock_irqrestore(&xencons_lock, flags);
+
+    return i;
+}
+
+static void xencons_put_char(struct tty_struct *tty, u_char ch)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    (void)__xencons_put_char(ch);
+    spin_unlock_irqrestore(&xencons_lock, flags);
+}
+
+static void xencons_flush_chars(struct tty_struct *tty)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    __xencons_tx_flush();
+    spin_unlock_irqrestore(&xencons_lock, flags);    
+}
+
+static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+    unsigned long orig_jiffies = jiffies;
+
+    while ( tty->driver->chars_in_buffer(tty) )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(1);
+        if ( signal_pending(current) )
+            break;
+        if ( (timeout != 0) && time_after(jiffies, orig_jiffies + timeout) )
+            break;
+    }
+    
+    set_current_state(TASK_RUNNING);
+}
+
+static int xencons_open(struct tty_struct *tty, struct file *filp)
+{
+    int line;
+    unsigned long flags;
+
+    MOD_INC_USE_COUNT;
+    line = tty->index;
+    if ( line < 0 || line >= NUM_XENCONS )
+    {
+        MOD_DEC_USE_COUNT;
+        return -ENODEV;
+    }
+
+    spin_lock_irqsave(&xencons_lock, flags);
+    tty->driver_data = NULL;
+    if ( xencons_tty == NULL )
+        xencons_tty = tty;
+    __xencons_tx_flush();
+    spin_unlock_irqrestore(&xencons_lock, flags);    
+
+    return 0;
+}
+
+static void xencons_close(struct tty_struct *tty, struct file *filp)
+{
+    unsigned long flags;
+
+    if ( tty->count == 1 )
+    {
+        tty->closing = 1;
+        tty_wait_until_sent(tty, 0);
+        if ( tty->driver->flush_buffer != NULL )
+            tty->driver->flush_buffer(tty);
+        if ( tty->ldisc.flush_buffer != NULL )
+            tty->ldisc.flush_buffer(tty);
+        tty->closing = 0;
+        spin_lock_irqsave(&xencons_lock, flags);
+        xencons_tty = NULL;
+        spin_unlock_irqrestore(&xencons_lock, flags);    
+    }
+
+    MOD_DEC_USE_COUNT;
+}
+
+static struct tty_operations xencons_ops = {
+    .open = xencons_open,
+    .close = xencons_close,
+    .write = xencons_write,
+    .write_room = xencons_write_room,
+    .put_char = xencons_put_char,
+    .flush_chars = xencons_flush_chars,
+    .chars_in_buffer = xencons_chars_in_buffer,
+    .send_xchar = xencons_send_xchar,
+    .flush_buffer = xencons_flush_buffer,
+    .throttle = xencons_throttle,
+    .unthrottle = xencons_unthrottle,
+    .wait_until_sent = xencons_wait_until_sent,
+};
+
+static int __init xencons_init(void)
+{
+    xencons_driver = alloc_tty_driver(NUM_XENCONS); /* XXX */
+    if (!xencons_driver)
+       return -ENOMEM;
+
+    xencons_driver->major           = TTY_MAJOR;
+    xencons_driver->type            = TTY_DRIVER_TYPE_SERIAL;
+    xencons_driver->subtype         = SERIAL_TYPE_NORMAL;
+    xencons_driver->init_termios    = tty_std_termios;
+    xencons_driver->flags           = 
+        TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS;
+    xencons_driver->termios         = xencons_termios;
+    xencons_driver->termios_locked  = xencons_termios_locked;
+
+    if ( xc_mode == XC_OFF )
+        return 0;
+
+    if ( xc_mode == XC_SERIAL )
+    {
+        xencons_driver->name        = "ttyS";
+        xencons_driver->minor_start = 64;
+        xencons_driver->num         = 1;
+    }
+    else
+    {
+        xencons_driver->name        = "tty";
+        xencons_driver->minor_start = 1;
+        xencons_driver->num         = MAX_NR_CONSOLES;
+    }
+
+    tty_set_operations(xencons_driver, &xencons_ops);
+
+    if ( tty_register_driver(xencons_driver) )
+        panic("Couldn't register Xen virtual console driver as %s\n",xencons_driver->name);
+
+    if ( start_info.flags & SIF_INITDOMAIN )
+    {
+        xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
+        (void)request_irq(xencons_priv_irq,
+                          xencons_priv_interrupt, 0, "console", NULL);
+    }
+    else
+    {
+        (void)ctrl_if_register_receiver(CMSG_CONSOLE, xencons_rx, 0);
+    }
+
+    printk("Xen virtual console successfully installed as %s\n",xencons_driver->name);
+    
+    return 0;
+}
+
+static void __exit xencons_fini(void)
+{
+    int ret;
+
+    if ( (ret = tty_unregister_driver(xencons_driver)) != 0 )
+        printk(KERN_ERR "Unable to unregister Xen console driver: %d\n", ret);
+
+    if ( start_info.flags & SIF_INITDOMAIN )
+    {
+        free_irq(xencons_priv_irq, NULL);
+        unbind_virq_from_irq(VIRQ_CONSOLE);
+    }
+    else
+    {
+        ctrl_if_unregister_receiver(CMSG_CONSOLE, xencons_rx);
+    }
+}
+
+module_init(xencons_init);
+module_exit(xencons_fini);
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/evtchn/Makefile b/linux-2.6.7-xen-sparse/drivers/xen/evtchn/Makefile
new file mode 100644 (file)
index 0000000..7b082a0
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y  := evtchn.o
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/evtchn/evtchn.c b/linux-2.6.7-xen-sparse/drivers/xen/evtchn/evtchn.c
new file mode 100644 (file)
index 0000000..81f5a4e
--- /dev/null
@@ -0,0 +1,374 @@
+/******************************************************************************
+ * evtchn.c
+ * 
+ * Xenolinux driver for receiving and demuxing event-channel signals.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/poll.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <asm-xen/evtchn.h>
+
+#if 0
+/* NB. This must be shared amongst drivers if more things go in /dev/xen */
+static devfs_handle_t xen_dev_dir;
+#endif
+
+/* Only one process may open /dev/xen/evtchn at any time. */
+static unsigned long evtchn_dev_inuse;
+
+/* Notification ring, accessed via /dev/xen/evtchn. */
+#define RING_SIZE     2048  /* 2048 16-bit entries */
+#define RING_MASK(_i) ((_i)&(RING_SIZE-1))
+static u16 *ring;
+static unsigned int ring_cons, ring_prod, ring_overflow;
+
+/* Processes wait on this queue via /dev/xen/evtchn when ring is empty. */
+static DECLARE_WAIT_QUEUE_HEAD(evtchn_wait);
+static struct fasync_struct *evtchn_async_queue;
+
+/* Which ports is user-space bound to? */
+static u32 bound_ports[32];
+
+static spinlock_t lock;
+
+void evtchn_device_upcall(int port)
+{
+
+    spin_lock(&lock);
+
+    mask_evtchn(port);
+    clear_evtchn(port);
+
+    if ( ring != NULL )
+    {
+        if ( (ring_prod - ring_cons) < RING_SIZE )
+        {
+            ring[RING_MASK(ring_prod)] = (u16)port;
+            if ( ring_cons == ring_prod++ )
+            {
+                wake_up_interruptible(&evtchn_wait);
+                kill_fasync(&evtchn_async_queue, SIGIO, POLL_IN);
+            }
+        }
+        else
+        {
+            ring_overflow = 1;
+        }
+    }
+
+    spin_unlock(&lock);
+}
+
+static void __evtchn_reset_buffer_ring(void)
+{
+    /* Initialise the ring to empty. Clear errors. */
+    ring_cons = ring_prod = ring_overflow = 0;
+}
+
+static ssize_t evtchn_read(struct file *file, char *buf,
+                           size_t count, loff_t *ppos)
+{
+    int rc;
+    unsigned int c, p, bytes1 = 0, bytes2 = 0;
+    DECLARE_WAITQUEUE(wait, current);
+
+    add_wait_queue(&evtchn_wait, &wait);
+
+    count &= ~1; /* even number of bytes */
+
+    if ( count == 0 )
+    {
+        rc = 0;
+        goto out;
+    }
+
+    if ( count > PAGE_SIZE )
+        count = PAGE_SIZE;
+
+    for ( ; ; )
+    {
+        set_current_state(TASK_INTERRUPTIBLE);
+
+        if ( (c = ring_cons) != (p = ring_prod) )
+            break;
+
+        if ( ring_overflow )
+        {
+            rc = -EFBIG;
+            goto out;
+        }
+
+        if ( file->f_flags & O_NONBLOCK )
+        {
+            rc = -EAGAIN;
+            goto out;
+        }
+
+        if ( signal_pending(current) )
+        {
+            rc = -ERESTARTSYS;
+            goto out;
+        }
+
+        schedule();
+    }
+
+    /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
+    if ( ((c ^ p) & RING_SIZE) != 0 )
+    {
+        bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(u16);
+        bytes2 = RING_MASK(p) * sizeof(u16);
+    }
+    else
+    {
+        bytes1 = (p - c) * sizeof(u16);
+        bytes2 = 0;
+    }
+
+    /* Truncate chunks according to caller's maximum byte count. */
+    if ( bytes1 > count )
+    {
+        bytes1 = count;
+        bytes2 = 0;
+    }
+    else if ( (bytes1 + bytes2) > count )
+    {
+        bytes2 = count - bytes1;
+    }
+
+    if ( copy_to_user(buf, &ring[RING_MASK(c)], bytes1) ||
+         ((bytes2 != 0) && copy_to_user(&buf[bytes1], &ring[0], bytes2)) )
+    {
+        rc = -EFAULT;
+        goto out;
+    }
+
+    ring_cons += (bytes1 + bytes2) / sizeof(u16);
+
+    rc = bytes1 + bytes2;
+
+ out:
+    __set_current_state(TASK_RUNNING);
+    remove_wait_queue(&evtchn_wait, &wait);
+    return rc;
+}
+
+static ssize_t evtchn_write(struct file *file, const char *buf,
+                            size_t count, loff_t *ppos)
+{
+    int  rc, i;
+    u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
+
+    if ( kbuf == NULL )
+        return -ENOMEM;
+
+    count &= ~1; /* even number of bytes */
+
+    if ( count == 0 )
+    {
+        rc = 0;
+        goto out;
+    }
+
+    if ( count > PAGE_SIZE )
+        count = PAGE_SIZE;
+
+    if ( copy_from_user(kbuf, buf, count) != 0 )
+    {
+        rc = -EFAULT;
+        goto out;
+    }
+
+    spin_lock_irq(&lock);
+    for ( i = 0; i < (count/2); i++ )
+        if ( test_bit(kbuf[i], (unsigned long *)&bound_ports[0]) )
+            unmask_evtchn(kbuf[i]);
+    spin_unlock_irq(&lock);
+
+    rc = count;
+
+ out:
+    free_page((unsigned long)kbuf);
+    return rc;
+}
+
+static int evtchn_ioctl(struct inode *inode, struct file *file,
+                        unsigned int cmd, unsigned long arg)
+{
+    int rc = 0;
+    
+    spin_lock_irq(&lock);
+    
+    switch ( cmd )
+    {
+    case EVTCHN_RESET:
+        __evtchn_reset_buffer_ring();
+        break;
+    case EVTCHN_BIND:
+        if ( !test_and_set_bit(arg, (unsigned long *)&bound_ports[0]) )
+            unmask_evtchn(arg);
+        else
+            rc = -EINVAL;
+        break;
+    case EVTCHN_UNBIND:
+        if ( test_and_clear_bit(arg, (unsigned long *)&bound_ports[0]) )
+            mask_evtchn(arg);
+        else
+            rc = -EINVAL;
+        break;
+    default:
+        rc = -ENOSYS;
+        break;
+    }
+
+    spin_unlock_irq(&lock);   
+
+    return rc;
+}
+
+static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+{
+    unsigned int mask = POLLOUT | POLLWRNORM;
+    poll_wait(file, &evtchn_wait, wait);
+    if ( ring_cons != ring_prod )
+        mask |= POLLIN | POLLRDNORM;
+    if ( ring_overflow )
+        mask = POLLERR;
+    return mask;
+}
+
+static int evtchn_fasync(int fd, struct file *filp, int on)
+{
+    return fasync_helper(fd, filp, on, &evtchn_async_queue);
+}
+
+static int evtchn_open(struct inode *inode, struct file *filp)
+{
+    u16 *_ring;
+
+    if ( test_and_set_bit(0, &evtchn_dev_inuse) )
+        return -EBUSY;
+
+    /* Allocate outside locked region so that we can use GFP_KERNEL. */
+    if ( (_ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
+        return -ENOMEM;
+
+    spin_lock_irq(&lock);
+    ring = _ring;
+    __evtchn_reset_buffer_ring();
+    spin_unlock_irq(&lock);
+
+    MOD_INC_USE_COUNT;
+
+    return 0;
+}
+
+static int evtchn_release(struct inode *inode, struct file *filp)
+{
+    int i;
+
+    spin_lock_irq(&lock);
+    if ( ring != NULL )
+    {
+        free_page((unsigned long)ring);
+        ring = NULL;
+    }
+    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
+        if ( test_and_clear_bit(i, (unsigned long *)&bound_ports[0]) )
+            mask_evtchn(i);
+    spin_unlock_irq(&lock);
+
+    evtchn_dev_inuse = 0;
+
+    MOD_DEC_USE_COUNT;
+
+    return 0;
+}
+
+static struct file_operations evtchn_fops = {
+    owner:    THIS_MODULE,
+    read:     evtchn_read,
+    write:    evtchn_write,
+    ioctl:    evtchn_ioctl,
+    poll:     evtchn_poll,
+    fasync:   evtchn_fasync,
+    open:     evtchn_open,
+    release:  evtchn_release
+};
+
+static struct miscdevice evtchn_miscdev = {
+       .minor          = EVTCHN_MINOR,
+       .name           = "evtchn",
+       .devfs_name     = "misc/evtchn",
+       .fops           = &evtchn_fops
+};
+
+static int __init evtchn_init(void)
+{
+#if 0
+    devfs_handle_t symlink_handle;
+    int            err, pos;
+    char           link_dest[64];
+#endif
+    int err;
+
+    /* (DEVFS) create '/dev/misc/evtchn'. */
+    err = misc_register(&evtchn_miscdev);
+    if ( err != 0 )
+    {
+        printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+        return err;
+    }
+
+#if 0
+    /* (DEVFS) create directory '/dev/xen'. */
+    xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
+
+    /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
+    pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 
+                              &link_dest[3], 
+                              sizeof(link_dest) - 3);
+    if ( pos >= 0 )
+        strncpy(&link_dest[pos], "../", 3);
+
+    /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
+    (void)devfs_mk_symlink(xen_dev_dir, 
+                           "evtchn", 
+                           DEVFS_FL_DEFAULT, 
+                           &link_dest[pos],
+                           &symlink_handle, 
+                           NULL);
+
+    /* (DEVFS) automatically destroy the symlink with its destination. */
+    devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
+#endif
+
+    printk("Event-channel device installed.\n");
+
+    return 0;
+}
+
+static void evtchn_cleanup(void)
+{
+    misc_deregister(&evtchn_miscdev);
+}
+
+module_init(evtchn_init);
+module_exit(evtchn_cleanup);
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/net/Kconfig b/linux-2.6.7-xen-sparse/drivers/xen/net/Kconfig
new file mode 100644 (file)
index 0000000..334e6c3
--- /dev/null
@@ -0,0 +1,6 @@
+
+config XENNET
+       tristate "Xen network driver"
+       depends on NETDEVICES && ARCH_XEN
+       help
+         Network driver for Xen
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/net/Makefile b/linux-2.6.7-xen-sparse/drivers/xen/net/Makefile
new file mode 100644 (file)
index 0000000..694dd83
--- /dev/null
@@ -0,0 +1,2 @@
+
+obj-y  := network.o
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/net/network.c b/linux-2.6.7-xen-sparse/drivers/xen/net/network.c
new file mode 100644 (file)
index 0000000..d48192f
--- /dev/null
@@ -0,0 +1,869 @@
+/******************************************************************************
+ * network.c
+ * 
+ * Virtual network driver for conversing with remote driver backends.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+
+#include <asm-xen/evtchn.h>
+#include <asm-xen/ctrl_if.h>
+
+#include <asm/page.h>
+
+#include <asm-xen/netif.h>
+
+#define RX_BUF_SIZE ((PAGE_SIZE/2)+1) /* Fool the slab allocator :-) */
+
+static void network_tx_buf_gc(struct net_device *dev);
+static void network_alloc_rx_buffers(struct net_device *dev);
+
+static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
+static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
+static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
+
+static struct list_head dev_list;
+
+struct net_private
+{
+    struct list_head list;
+    struct net_device *dev;
+
+    struct net_device_stats stats;
+    NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
+    unsigned int tx_full;
+    
+    netif_tx_interface_t *tx;
+    netif_rx_interface_t *rx;
+
+    spinlock_t   tx_lock;
+    spinlock_t   rx_lock;
+
+    unsigned int handle;
+    unsigned int evtchn;
+    unsigned int irq;
+
+    /* What is the status of our connection to the remote backend? */
+#define BEST_CLOSED       0
+#define BEST_DISCONNECTED 1
+#define BEST_CONNECTED    2
+    unsigned int backend_state;
+
+    /* Is this interface open or closed (down or up)? */
+#define UST_CLOSED        0
+#define UST_OPEN          1
+    unsigned int user_state;
+
+    /*
+     * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
+     * array is an index into a chain of free entries.
+     */
+    struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
+    struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
+};
+
+/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
+#define ADD_ID_TO_FREELIST(_list, _id)             \
+    (_list)[(_id)] = (_list)[0];                   \
+    (_list)[0]     = (void *)(unsigned long)(_id);
+#define GET_ID_FROM_FREELIST(_list)                \
+ ({ unsigned long _id = (unsigned long)(_list)[0]; \
+    (_list)[0]  = (_list)[_id];                    \
+    (unsigned short)_id; })
+
+static struct net_device *find_dev_by_handle(unsigned int handle)
+{
+    struct list_head *ent;
+    struct net_private *np;
+    list_for_each ( ent, &dev_list )
+    {
+        np = list_entry(ent, struct net_private, list);
+        if ( np->handle == handle )
+            return np->dev;
+    }
+    return NULL;
+}
+
+/** Network interface info. */
+struct netif_ctrl {
+    /** Number of interfaces. */
+    int interface_n;
+    /** Number of connected interfaces. */
+    int connected_n;
+    /** Error code. */
+    int err;
+};
+
+static struct netif_ctrl netctrl;
+
+static void netctrl_init(void)
+{
+    memset(&netctrl, 0, sizeof(netctrl));
+    netctrl.interface_n = -1;
+}
+
+/** Get or set a network interface error.
+ */
+static int netctrl_err(int err)
+{
+    if(err < 0 && !netctrl.err){
+        netctrl.err = err;
+        printk(KERN_WARNING "%s> err=%d\n", __FUNCTION__, err);
+    }
+    return netctrl.err;
+}
+
+/** Test if all network interfaces are connected.
+ *
+ * @return 1 if all connected, 0 if not, negative error code otherwise
+ */
+static int netctrl_connected(void)
+{
+    int ok = 0;
+    ok = (netctrl.err ? netctrl.err :
+          (netctrl.connected_n == netctrl.interface_n));
+    return ok;
+}
+
+/** Count the connected network interfaces.
+ *
+ * @return connected count
+ */
+static int netctrl_connected_count(void)
+{
+    
+    struct list_head *ent;
+    struct net_private *np;
+    unsigned int connected;
+
+    connected = 0;
+    
+    list_for_each(ent, &dev_list)
+    {
+        np = list_entry(ent, struct net_private, list);
+        if ( np->backend_state == BEST_CONNECTED )
+            connected++;
+    }
+
+    netctrl.connected_n = connected;
+    return connected;
+}
+
+static int network_open(struct net_device *dev)
+{
+    struct net_private *np = dev->priv;
+
+    memset(&np->stats, 0, sizeof(np->stats));
+
+    np->user_state = UST_OPEN;
+
+    network_alloc_rx_buffers(dev);
+    np->rx->event = np->rx_resp_cons + 1;
+
+    netif_start_queue(dev);
+
+    return 0;
+}
+
+
+static void network_tx_buf_gc(struct net_device *dev)
+{
+    NETIF_RING_IDX i, prod;
+    unsigned short id;
+    struct net_private *np = dev->priv;
+    struct sk_buff *skb;
+
+    if ( np->backend_state != BEST_CONNECTED )
+        return;
+
+    do {
+        prod = np->tx->resp_prod;
+
+        for ( i = np->tx_resp_cons; i != prod; i++ )
+        {
+            id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
+            skb = np->tx_skbs[id];
+            ADD_ID_TO_FREELIST(np->tx_skbs, id);
+            dev_kfree_skb_any(skb);
+        }
+        
+        np->tx_resp_cons = prod;
+        
+        /*
+         * Set a new event, then check for race with update of tx_cons. Note
+         * that it is essential to schedule a callback, no matter how few
+         * buffers are pending. Even if there is space in the transmit ring,
+         * higher layers may be blocked because too much data is outstanding:
+         * in such cases notification from Xen is likely to be the only kick
+         * that we'll get.
+         */
+        np->tx->event = 
+            prod + ((np->tx->req_prod - prod) >> 1) + 1;
+        mb();
+    }
+    while ( prod != np->tx->resp_prod );
+
+    if ( np->tx_full && 
+         ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
+    {
+        np->tx_full = 0;
+        if ( np->user_state == UST_OPEN )
+            netif_wake_queue(dev);
+    }
+}
+
+
+static void network_alloc_rx_buffers(struct net_device *dev)
+{
+    unsigned short id;
+    struct net_private *np = dev->priv;
+    struct sk_buff *skb;
+    NETIF_RING_IDX i = np->rx->req_prod;
+    int nr_pfns = 0;
+
+    /* Make sure the batch is large enough to be worthwhile (1/2 ring). */
+    if ( unlikely((i - np->rx_resp_cons) > (NETIF_RX_RING_SIZE/2)) || 
+         unlikely(np->backend_state != BEST_CONNECTED) )
+        return;
+
+    do {
+        skb = dev_alloc_skb(RX_BUF_SIZE);
+        if ( unlikely(skb == NULL) )
+            break;
+
+        skb->dev = dev;
+
+        if ( unlikely(((unsigned long)skb->head & (PAGE_SIZE-1)) != 0) )
+            panic("alloc_skb needs to provide us page-aligned buffers.");
+
+        id = GET_ID_FROM_FREELIST(np->rx_skbs);
+
+        np->rx_skbs[id] = skb;
+        
+        np->rx->ring[MASK_NETIF_RX_IDX(i)].req.id = id;
+        
+        rx_pfn_array[nr_pfns] = virt_to_machine(skb->head) >> PAGE_SHIFT;
+
+        rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping;
+        rx_mcl[nr_pfns].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+        rx_mcl[nr_pfns].args[1] = 0;
+        rx_mcl[nr_pfns].args[2] = 0;
+
+        nr_pfns++;
+    }
+    while ( (++i - np->rx_resp_cons) != NETIF_RX_RING_SIZE );
+
+    /*
+     * We may have allocated buffers which have entries outstanding in the page
+     * update queue -- make sure we flush those first!
+     */
+    flush_page_update_queue();
+
+    /* After all PTEs have been zapped we blow away stale TLB entries. */
+    rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB;
+
+    /* Give away a batch of pages. */
+    rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
+    rx_mcl[nr_pfns].args[0] = MEMOP_decrease_reservation;
+    rx_mcl[nr_pfns].args[1] = (unsigned long)rx_pfn_array;
+    rx_mcl[nr_pfns].args[2] = (unsigned long)nr_pfns;
+
+    /* Zap PTEs and give away pages in one big multicall. */
+    (void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1);
+
+    /* Check return status of HYPERVISOR_dom_mem_op(). */
+    if ( rx_mcl[nr_pfns].args[5] != nr_pfns )
+        panic("Unable to reduce memory reservation\n");
+
+    np->rx->req_prod = i;
+}
+
+
+static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+    unsigned short id;
+    struct net_private *np = (struct net_private *)dev->priv;
+    netif_tx_request_t *tx;
+    NETIF_RING_IDX i;
+
+    if ( unlikely(np->tx_full) )
+    {
+        printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
+        netif_stop_queue(dev);
+        return -ENOBUFS;
+    }
+
+    if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
+                  PAGE_SIZE) )
+    {
+        struct sk_buff *new_skb = dev_alloc_skb(RX_BUF_SIZE);
+        if ( unlikely(new_skb == NULL) )
+            return 1;
+        skb_put(new_skb, skb->len);
+        memcpy(new_skb->data, skb->data, skb->len);
+        dev_kfree_skb(skb);
+        skb = new_skb;
+    }
+    
+    spin_lock_irq(&np->tx_lock);
+
+    if ( np->backend_state != BEST_CONNECTED )
+    {
+        spin_unlock_irq(&np->tx_lock);
+        return 1;
+    }
+
+    i = np->tx->req_prod;
+
+    id = GET_ID_FROM_FREELIST(np->tx_skbs);
+    np->tx_skbs[id] = skb;
+
+    tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
+
+    tx->id   = id;
+    tx->addr = virt_to_machine(skb->data);
+    tx->size = skb->len;
+
+    wmb();
+    np->tx->req_prod = i + 1;
+
+    network_tx_buf_gc(dev);
+
+    if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
+    {
+        np->tx_full = 1;
+        netif_stop_queue(dev);
+    }
+
+    spin_unlock_irq(&np->tx_lock);
+
+    np->stats.tx_bytes += skb->len;
+    np->stats.tx_packets++;
+
+    /* Only notify Xen if there are no outstanding responses. */
+    mb();
+    if ( np->tx->resp_prod == i )
+        notify_via_evtchn(np->evtchn);
+
+    return 0;
+}
+
+
+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+    struct net_device *dev = dev_id;
+    struct net_private *np = dev->priv;
+    unsigned long flags;
+
+    spin_lock_irqsave(&np->tx_lock, flags);
+    network_tx_buf_gc(dev);
+    spin_unlock_irqrestore(&np->tx_lock, flags);
+
+    if ( (np->rx_resp_cons != np->rx->resp_prod) &&
+         (np->user_state == UST_OPEN) )
+        netif_rx_schedule(dev);
+
+    return IRQ_HANDLED;
+}
+
+
+static int netif_poll(struct net_device *dev, int *pbudget)
+{
+    struct net_private *np = dev->priv;
+    struct sk_buff *skb;
+    netif_rx_response_t *rx;
+    NETIF_RING_IDX i;
+    mmu_update_t *mmu = rx_mmu;
+    multicall_entry_t *mcl = rx_mcl;
+    int work_done, budget, more_to_do = 1;
+    struct sk_buff_head rxq;
+    unsigned long flags;
+
+    spin_lock(&np->rx_lock);
+
+    if ( np->backend_state != BEST_CONNECTED )
+    {
+        spin_unlock(&np->rx_lock);
+        return 0;
+    }
+
+    skb_queue_head_init(&rxq);
+
+    if ( (budget = *pbudget) > dev->quota )
+        budget = dev->quota;
+
+    for ( i = np->rx_resp_cons, work_done = 0; 
+          (i != np->rx->resp_prod) && (work_done < budget); 
+          i++, work_done++ )
+    {
+        rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+
+        skb = np->rx_skbs[rx->id];
+        ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
+
+        if ( unlikely(rx->status <= 0) )
+        {
+            /* Gate this error. We get a (valid) slew of them on suspend. */
+            if ( np->user_state != UST_OPEN )
+                printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
+            dev_kfree_skb(skb);
+            continue;
+        }
+
+        skb->data = skb->tail = skb->head + (rx->addr & ~PAGE_MASK);
+        skb_put(skb, rx->status);
+
+        np->stats.rx_packets++;
+        np->stats.rx_bytes += rx->status;
+
+        /* Remap the page. */
+        mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
+        mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
+        mmu++;
+        mcl->op = __HYPERVISOR_update_va_mapping;
+        mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+        mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
+        mcl->args[2] = 0;
+        mcl++;
+
+        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
+            rx->addr >> PAGE_SHIFT;
+
+        __skb_queue_tail(&rxq, skb);
+    }
+
+    /* Do all the remapping work, and M->P updates, in one big hypercall. */
+    if ( likely((mcl - rx_mcl) != 0) )
+    {
+        mcl->op = __HYPERVISOR_mmu_update;
+        mcl->args[0] = (unsigned long)rx_mmu;
+        mcl->args[1] = mmu - rx_mmu;
+        mcl->args[2] = 0;
+        mcl++;
+        (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
+    }
+
+    while ( (skb = __skb_dequeue(&rxq)) != NULL )
+    {
+        /* Set the shared-info area, which is hidden behind the real data. */
+        atomic_set(&(skb_shinfo(skb)->dataref), 1);
+        skb_shinfo(skb)->nr_frags = 0;
+        skb_shinfo(skb)->frag_list = NULL;
+
+        /* Ethernet-specific work. Delayed to here as it peeks the header. */
+        skb->protocol = eth_type_trans(skb, dev);
+
+        /* Pass it up. */
+        netif_rx(skb);
+        dev->last_rx = jiffies;
+    }
+
+    np->rx_resp_cons = i;
+
+    network_alloc_rx_buffers(dev);
+
+    *pbudget   -= work_done;
+    dev->quota -= work_done;
+
+    if ( work_done < budget )
+    {
+        local_irq_save(flags);
+
+        np->rx->event = i + 1;
+    
+        /* Deal with hypervisor racing our resetting of rx_event. */
+        mb();
+        if ( np->rx->resp_prod == i )
+        {
+            __netif_rx_complete(dev);
+            more_to_do = 0;
+        }
+
+        local_irq_restore(flags);
+    }
+
+    spin_unlock(&np->rx_lock);
+
+    return more_to_do;
+}
+
+
+static int network_close(struct net_device *dev)
+{
+    struct net_private *np = dev->priv;
+    np->user_state = UST_CLOSED;
+    netif_stop_queue(np->dev);
+    return 0;
+}
+
+
+static struct net_device_stats *network_get_stats(struct net_device *dev)
+{
+    struct net_private *np = (struct net_private *)dev->priv;
+    return &np->stats;
+}
+
+
+static void network_connect(struct net_device *dev,
+                            netif_fe_interface_status_changed_t *status)
+{
+    struct net_private *np;
+    int i, requeue_idx;
+    netif_tx_request_t *tx;
+
+    np = dev->priv;
+    spin_lock_irq(&np->rx_lock);
+    spin_lock(&np->tx_lock);
+
+    /* Recovery procedure: */
+
+    /* Step 1: Reinitialise variables. */
+    np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
+    np->rx->event = 1;
+
+    /* Step 2: Rebuild the RX and TX ring contents.
+     * NB. We could just free the queued TX packets now but we hope
+     * that sending them out might do some good.  We have to rebuild
+     * the RX ring because some of our pages are currently flipped out
+     * so we can't just free the RX skbs.
+     * NB2. Freelist index entries are always going to be less than
+     *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
+     * greater than __PAGE_OFFSET: we use this property to distinguish
+     * them.
+     */
+
+    /* Rebuild the TX buffer freelist and the TX ring itself.
+     * NB. This reorders packets.  We could keep more private state
+     * to avoid this but maybe it doesn't matter so much given the
+     * interface has been down.
+     */
+    for ( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ )
+    {
+            if ( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET )
+            {
+                struct sk_buff *skb = np->tx_skbs[i];
+                
+                tx = &np->tx->ring[requeue_idx++].req;
+                
+                tx->id   = i;
+                tx->addr = virt_to_machine(skb->data);
+                tx->size = skb->len;
+                
+                np->stats.tx_bytes += skb->len;
+                np->stats.tx_packets++;
+            }
+    }
+    wmb();
+    np->tx->req_prod = requeue_idx;
+
+    /* Rebuild the RX buffer freelist and the RX ring itself. */
+    for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
+        if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
+            np->rx->ring[requeue_idx++].req.id = i;
+    wmb();                
+    np->rx->req_prod = requeue_idx;
+
+    /* Step 3: All public and private state should now be sane.  Get
+     * ready to start sending and receiving packets and give the driver
+     * domain a kick because we've probably just requeued some
+     * packets.
+     */
+    np->backend_state = BEST_CONNECTED;
+    notify_via_evtchn(status->evtchn);  
+    network_tx_buf_gc(dev);
+
+    if ( np->user_state == UST_OPEN )
+        netif_start_queue(dev);
+
+    spin_unlock(&np->tx_lock);
+    spin_unlock_irq(&np->rx_lock);
+}
+
+static void netif_status_change(netif_fe_interface_status_changed_t *status)
+{
+    ctrl_msg_t                   cmsg;
+    netif_fe_interface_connect_t up;
+    struct net_device *dev;
+    struct net_private *np;
+    
+    if ( netctrl.interface_n <= 0 )
+    {
+        printk(KERN_WARNING "Status change: no interfaces\n");
+        return;
+    }
+
+    dev = find_dev_by_handle(status->handle);
+    if(!dev){
+        printk(KERN_WARNING "Status change: invalid netif handle %u\n",
+               status->handle);
+         return;
+    }
+    np  = dev->priv;
+    
+    switch ( status->status )
+    {
+    case NETIF_INTERFACE_STATUS_DESTROYED:
+        printk(KERN_WARNING "Unexpected netif-DESTROYED message in state %d\n",
+               np->backend_state);
+        break;
+
+    case NETIF_INTERFACE_STATUS_DISCONNECTED:
+        if ( np->backend_state != BEST_CLOSED )
+        {
+            printk(KERN_WARNING "Unexpected netif-DISCONNECTED message"
+                   " in state %d\n", np->backend_state);
+           printk(KERN_INFO "Attempting to reconnect network interface\n");
+
+            /* Begin interface recovery.
+            *
+            * NB. Whilst we're recovering, we turn the carrier state off.  We
+            * take measures to ensure that this device isn't used for
+            * anything.  We also stop the queue for this device.  Various
+            * different approaches (e.g. continuing to buffer packets) have
+            * been tested but don't appear to improve the overall impact on
+             * TCP connections.
+            *
+             * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
+             * is initiated by a special "RESET" message - disconnect could
+             * just mean we're not allowed to use this interface any more.
+             */
+
+            /* Stop old i/f to prevent errors whilst we rebuild the state. */
+            spin_lock_irq(&np->tx_lock);
+            spin_lock(&np->rx_lock);
+            netif_stop_queue(dev);
+            np->backend_state = BEST_DISCONNECTED;
+            spin_unlock(&np->rx_lock);
+            spin_unlock_irq(&np->tx_lock);
+
+            /* Free resources. */
+            free_irq(np->irq, dev);
+            unbind_evtchn_from_irq(np->evtchn);
+           free_page((unsigned long)np->tx);
+            free_page((unsigned long)np->rx);
+        }
+
+        /* Move from CLOSED to DISCONNECTED state. */
+        np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
+        np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
+        memset(np->tx, 0, PAGE_SIZE);
+        memset(np->rx, 0, PAGE_SIZE);
+        np->backend_state = BEST_DISCONNECTED;
+
+        /* Construct an interface-CONNECT message for the domain controller. */
+        cmsg.type      = CMSG_NETIF_FE;
+        cmsg.subtype   = CMSG_NETIF_FE_INTERFACE_CONNECT;
+        cmsg.length    = sizeof(netif_fe_interface_connect_t);
+        up.handle      = status->handle;
+        up.tx_shmem_frame = virt_to_machine(np->tx) >> PAGE_SHIFT;
+        up.rx_shmem_frame = virt_to_machine(np->rx) >> PAGE_SHIFT;
+        memcpy(cmsg.msg, &up, sizeof(up));
+        
+        /* Tell the controller to bring up the interface. */
+        ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+        break;
+
+    case NETIF_INTERFACE_STATUS_CONNECTED:
+        if ( np->backend_state == BEST_CLOSED )
+        {
+            printk(KERN_WARNING "Unexpected netif-CONNECTED message"
+                   " in state %d\n", np->backend_state);
+            break;
+        }
+
+        memcpy(dev->dev_addr, status->mac, ETH_ALEN);
+
+        network_connect(dev, status);
+
+        np->evtchn = status->evtchn;
+        np->irq = bind_evtchn_to_irq(np->evtchn);
+        (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, 
+                          dev->name, dev);
+        
+        netctrl_connected_count();
+        break;
+
+    default:
+        printk(KERN_WARNING "Status change to unknown value %d\n", 
+               status->status);
+        break;
+    }
+}
+
+/** Create a network device.
+ * @param handle device handle
+ * @param val return parameter for created device
+ * @return 0 on success, error code otherwise
+ */
+static int create_netdev(int handle, struct net_device **val)
+{
+    int i, err = 0;
+    struct net_device *dev = NULL;
+    struct net_private *np = NULL;
+
+    if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
+    {
+        printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
+        err = -ENOMEM;
+        goto exit;
+    }
+
+    np                = dev->priv;
+    np->backend_state = BEST_CLOSED;
+    np->user_state    = UST_CLOSED;
+    np->handle        = handle;
+    
+    spin_lock_init(&np->tx_lock);
+    spin_lock_init(&np->rx_lock);
+
+    /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
+    for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
+        np->tx_skbs[i] = (void *)(i+1);
+    for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
+        np->rx_skbs[i] = (void *)(i+1);
+
+    dev->open            = network_open;
+    dev->hard_start_xmit = network_start_xmit;
+    dev->stop            = network_close;
+    dev->get_stats       = network_get_stats;
+    dev->poll            = netif_poll;
+    dev->weight          = 64;
+    
+    if ( (err = register_netdev(dev)) != 0 )
+    {
+        printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
+        goto exit;
+    }
+    np->dev = dev;
+    list_add(&np->list, &dev_list);
+
+  exit:
+    if ( (err != 0) && (dev != NULL ) )
+        kfree(dev);
+    else if ( val != NULL )
+        *val = dev;
+    return err;
+}
+
+/*
+ * Initialize the network control interface. Set the number of network devices
+ * and create them.
+ */
+static void netif_driver_status_change(
+    netif_fe_driver_status_changed_t *status)
+{
+    int err = 0;
+    int i;
+    
+    netctrl.interface_n = status->nr_interfaces;
+    netctrl.connected_n = 0;
+
+    for ( i = 0; i < netctrl.interface_n; i++ )
+    {
+        if ( (err = create_netdev(i, NULL)) != 0 )
+        {
+            netctrl_err(err);
+            break;
+        }
+    }
+}
+
+static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+    int respond = 1;
+
+    switch ( msg->subtype )
+    {
+    case CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED:
+        if ( msg->length != sizeof(netif_fe_interface_status_changed_t) )
+            goto error;
+        netif_status_change((netif_fe_interface_status_changed_t *)
+                            &msg->msg[0]);
+        break;
+
+    case CMSG_NETIF_FE_DRIVER_STATUS_CHANGED:
+        if ( msg->length != sizeof(netif_fe_driver_status_changed_t) )
+            goto error;
+        netif_driver_status_change((netif_fe_driver_status_changed_t *)
+                                   &msg->msg[0]);
+        /* Message is a response */
+        respond = 0;
+        break;
+
+    error:
+    default:
+        msg->length = 0;
+        break;
+    }
+
+    if ( respond )
+        ctrl_if_send_response(msg);
+}
+
+
+static int __init netif_init(void)
+{
+    ctrl_msg_t                       cmsg;
+    netif_fe_driver_status_changed_t st;
+    int err = 0, wait_i, wait_n = 20;
+
+    if ( (start_info.flags & SIF_INITDOMAIN) ||
+         (start_info.flags & SIF_NET_BE_DOMAIN) )
+        return 0;
+
+    printk("Initialising Xen virtual ethernet frontend driver");
+
+    INIT_LIST_HEAD(&dev_list);
+
+    netctrl_init();
+
+    (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
+                                    CALLBACK_IN_BLOCKING_CONTEXT);
+
+    /* Send a driver-UP notification to the domain controller. */
+    cmsg.type      = CMSG_NETIF_FE;
+    cmsg.subtype   = CMSG_NETIF_FE_DRIVER_STATUS_CHANGED;
+    cmsg.length    = sizeof(netif_fe_driver_status_changed_t);
+    st.status      = NETIF_DRIVER_STATUS_UP;
+    st.nr_interfaces = 0;
+    memcpy(cmsg.msg, &st, sizeof(st));
+    ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+
+    /* Wait for all interfaces to be connected. */
+    for ( wait_i = 0; ; wait_i++)
+    {
+        if ( (err = (wait_i < wait_n) ? netctrl_connected() : -ENETDOWN) != 0 )
+        {
+            err = (err > 0) ? 0 : err;
+            break;
+        }
+        set_current_state(TASK_INTERRUPTIBLE);
+        schedule_timeout(1);
+     }
+
+    if ( err )
+        ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
+
+    return err;
+}
+
+__initcall(netif_init);
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/desc.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/desc.h
new file mode 100644 (file)
index 0000000..b36aaa2
--- /dev/null
@@ -0,0 +1,133 @@
+#ifndef __ARCH_DESC_H
+#define __ARCH_DESC_H
+
+#include <asm/ldt.h>
+#include <asm/segment.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/preempt.h>
+#include <linux/smp.h>
+
+#include <asm/mmu.h>
+
+extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
+
+struct Xgt_desc_struct {
+       unsigned short size;
+       unsigned long address __attribute__((packed));
+       unsigned short pad;
+} __attribute__ ((packed));
+
+extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
+
+#define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
+#define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8))
+
+#define get_cpu_gdt_table(_cpu) ((struct desc_struct *)cpu_gdt_descr[(_cpu)].address)
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt[];
+extern void set_intr_gate(unsigned int irq, void * addr);
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
+       "movw %%ax,2(%2)\n\t" \
+       "rorl $16,%%eax\n\t" \
+       "movb %%al,4(%2)\n\t" \
+       "movb %4,5(%2)\n\t" \
+       "movb $0,6(%2)\n\t" \
+       "movb %%ah,7(%2)\n\t" \
+       "rorl $16,%%eax" \
+       : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+
+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
+{
+       _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry],
+           (int)addr, 235, 0x89);
+}
+
+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+
+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
+{
+       _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
+           (int)addr, ((size << 3)-1), 0x82);
+}
+
+#define LDT_entry_a(info) \
+       ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+#define LDT_entry_b(info) \
+       (((info)->base_addr & 0xff000000) | \
+       (((info)->base_addr & 0x00ff0000) >> 16) | \
+       ((info)->limit & 0xf0000) | \
+       (((info)->read_exec_only ^ 1) << 9) | \
+       ((info)->contents << 10) | \
+       (((info)->seg_not_present ^ 1) << 15) | \
+       ((info)->seg_32bit << 22) | \
+       ((info)->limit_in_pages << 23) | \
+       ((info)->useable << 20) | \
+       0x7000)
+
+#define LDT_empty(info) (\
+       (info)->base_addr       == 0    && \
+       (info)->limit           == 0    && \
+       (info)->contents        == 0    && \
+       (info)->read_exec_only  == 1    && \
+       (info)->seg_32bit       == 0    && \
+       (info)->limit_in_pages  == 0    && \
+       (info)->seg_not_present == 1    && \
+       (info)->useable         == 0    )
+
+#if TLS_SIZE != 24
+# error update this code.
+#endif
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
+       C(0); C(1); C(2);
+#undef C
+}
+
+static inline void clear_LDT(void)
+{
+       int cpu = get_cpu();
+
+       /*
+        * NB. We load the default_ldt for lcall7/27 handling on demand, as
+        * it slows down context switching. Noone uses it anyway.
+        */
+       cpu = cpu;              /* XXX avoid compiler warning */
+       queue_set_ldt(0UL, 0);
+       put_cpu();
+}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+{
+       void *segments = pc->ldt;
+       int count = pc->size;
+
+       if (likely(!count))
+               segments = NULL;
+
+       queue_set_ldt((unsigned long)segments, count);
+}
+
+static inline void load_LDT(mm_context_t *pc)
+{
+       int cpu = get_cpu();
+       load_LDT_nolock(pc, cpu);
+       put_cpu();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/e820.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/e820.h
new file mode 100644 (file)
index 0000000..c4cae27
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * structures and definitions for the int 15, ax=e820 memory map
+ * scheme.
+ *
+ * In a nutshell, arch/i386/boot/setup.S populates a scratch table
+ * in the empty_zero_block that contains a list of usable address/size
+ * duples.   In arch/i386/kernel/setup.c, this information is
+ * transferred into the e820map, and in arch/i386/mm/init.c, that
+ * new information is used to mark pages reserved or not.
+ *
+ */
+#ifndef __E820_HEADER
+#define __E820_HEADER
+
+#define E820MAP        0x2d0           /* our map */
+#define E820MAX        32              /* number of entries in E820MAP */
+#define E820NR 0x1e8           /* # entries in E820MAP */
+
+#define E820_RAM       1
+#define E820_RESERVED  2
+#define E820_ACPI      3 /* usable as RAM once ACPI tables have been read */
+#define E820_NVS       4
+
+#define HIGH_MEMORY    (0)
+
+#ifndef __ASSEMBLY__
+
+struct e820map {
+    int nr_map;
+    struct e820entry {
+       unsigned long long addr;        /* start of memory segment */
+       unsigned long long size;        /* size of memory segment */
+       unsigned long type;             /* type of memory segment */
+    } map[E820MAX];
+};
+
+extern struct e820map e820;
+#endif/*!__ASSEMBLY__*/
+
+#endif/*__E820_HEADER*/
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/fixmap.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/fixmap.h
new file mode 100644 (file)
index 0000000..33a1c2d
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/acpi.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+       FIX_HOLE,
+       FIX_VSYSCALL,
+#ifdef CONFIG_X86_LOCAL_APIC
+       FIX_APIC_BASE,  /* local (CPU) APIC) -- required for SMP or not */
+#endif
+#ifdef CONFIG_X86_IO_APIC
+       FIX_IO_APIC_BASE_0,
+       FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
+#endif
+#ifdef CONFIG_X86_VISWS_APIC
+       FIX_CO_CPU,     /* Cobalt timer */
+       FIX_CO_APIC,    /* Cobalt APIC Redirection Table */ 
+       FIX_LI_PCIA,    /* Lithium PCI Bridge A */
+       FIX_LI_PCIB,    /* Lithium PCI Bridge B */
+#endif
+#ifdef CONFIG_X86_F00F_BUG
+       FIX_F00F_IDT,   /* Virtual mapping for IDT */
+#endif
+#ifdef CONFIG_X86_CYCLONE_TIMER
+       FIX_CYCLONE_TIMER, /*cyclone timer register*/
+#endif 
+#ifdef CONFIG_HIGHMEM
+       FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+       FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+#ifdef CONFIG_ACPI_BOOT
+       FIX_ACPI_BEGIN,
+       FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
+#endif
+#ifdef CONFIG_PCI_MMCONFIG
+       FIX_PCIE_MCFG,
+#endif
+       FIX_SHARED_INFO,
+       __end_of_permanent_fixed_addresses,
+       /* temporary boot-time mappings, used before ioremap() is functional */
+#define NR_FIX_BTMAPS  16
+       FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+       FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
+       FIX_WP_TEST,
+       __end_of_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+                                       unsigned long phys, pgprot_t flags);
+extern void __set_fixmap_ma (enum fixed_addresses idx,
+                                       unsigned long mach, pgprot_t flags);
+
+#define set_fixmap(idx, phys) \
+               __set_fixmap(idx, phys, PAGE_KERNEL)
+#define set_fixmap_ma(idx, phys) \
+               __set_fixmap_ma(idx, phys, PAGE_KERNEL)
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+               __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+#define clear_fixmap(idx) \
+               __set_fixmap(idx, 0, __pgprot(0))
+
+/*
+ * used by vmalloc.c.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap.
+ */
+#define FIXADDR_TOP    (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE)
+#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START  (FIXADDR_TOP - __FIXADDR_SIZE)
+
+#define __fix_to_virt(x)       (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x)       ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+/*
+ * This is the range that is readable by user mode, and things
+ * acting like user mode such as get_user_pages.
+ */
+#define FIXADDR_USER_START     (__fix_to_virt(FIX_VSYSCALL))
+#define FIXADDR_USER_END       (FIXADDR_USER_START + PAGE_SIZE)
+
+
+extern void __this_fixmap_does_not_exist(void);
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static inline unsigned long fix_to_virt(const unsigned int idx)
+{
+       /*
+        * this branch gets completely eliminated after inlining,
+        * except when someone tries to use fixaddr indices in an
+        * illegal way. (such as mixing up address types or using
+        * out-of-range indices).
+        *
+        * If it doesn't get removed, the linker will complain
+        * loudly with a reasonably clear error message..
+        */
+       if (idx >= __end_of_fixed_addresses)
+               __this_fixmap_does_not_exist();
+
+        return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+       BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+       return __virt_to_fix(vaddr);
+}
+
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/hypervisor.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/hypervisor.h
new file mode 100644 (file)
index 0000000..ff65b7e
--- /dev/null
@@ -0,0 +1,455 @@
+/******************************************************************************
+ * hypervisor.h
+ * 
+ * Linux-specific hypervisor handling.
+ * 
+ * Copyright (c) 2002, K A Fraser
+ */
+
+#ifndef __HYPERVISOR_H__
+#define __HYPERVISOR_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+#include <asm/hypervisor-ifs/dom0_ops.h>
+#include <asm-xen/domain_controller.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm-xen/xen.h>
+
+/* arch/xen/i386/kernel/setup.c */
+union start_info_union
+{
+    extended_start_info_t start_info;
+    char padding[512];
+};
+extern union start_info_union start_info_union;
+#define start_info (start_info_union.start_info)
+
+/* arch/xen/i386/kernel/hypervisor.c */
+void do_hypervisor_callback(struct pt_regs *regs);
+
+/* arch/xen/i386/mm/init.c */
+void wrprotect_bootpt(pgd_t *, void *, int);
+
+/* arch/xen/i386/kernel/head.S */
+void lgdt_finish(void);
+
+/* arch/xen/i386/mm/hypervisor.c */
+/*
+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
+ * be MACHINE addresses.
+ */
+
+extern unsigned int mmu_update_queue_idx;
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val);
+void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
+void queue_pt_switch(unsigned long ptr);
+void queue_tlb_flush(void);
+void queue_invlpg(unsigned long ptr);
+void queue_pgd_pin(unsigned long ptr);
+void queue_pgd_unpin(unsigned long ptr);
+void queue_pte_pin(unsigned long ptr);
+void queue_pte_unpin(unsigned long ptr);
+void queue_set_ldt(unsigned long ptr, unsigned long bytes);
+void queue_machphys_update(unsigned long mfn, unsigned long pfn);
+#define MMU_UPDATE_DEBUG 0
+
+#if MMU_UPDATE_DEBUG > 0
+typedef struct {
+    void *ptr;
+    unsigned long val, pteval;
+    void *ptep;
+    int line; char *file;
+} page_update_debug_t;
+extern page_update_debug_t update_debug_queue[];
+#define queue_l1_entry_update(_p,_v) ({                           \
+ update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
+ update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
+ queue_l1_entry_update((_p),(_v));                                \
+})
+#define queue_l2_entry_update(_p,_v) ({                           \
+ update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
+ update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
+ queue_l2_entry_update((_p),(_v));                                \
+})
+#endif
+
+#if MMU_UPDATE_DEBUG > 1
+#if MMU_UPDATE_DEBUG > 2
+#undef queue_l1_entry_update
+#define queue_l1_entry_update(_p,_v) ({                           \
+ update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
+ update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
+ printk("L1 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
+        (_p), virt_to_machine(_p), pte_val(*(_p)),                 \
+        (unsigned long)(_v));                                     \
+ queue_l1_entry_update((_p),(_v));                                \
+})
+#endif
+#undef queue_l2_entry_update
+#define queue_l2_entry_update(_p,_v) ({                           \
+ update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
+ update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
+ update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
+ update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
+ printk("L2 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
+        (_p), virt_to_machine(_p), pmd_val(*_p),                  \
+        (unsigned long)(_v));                                     \
+ queue_l2_entry_update((_p),(_v));                                \
+})
+#define queue_pt_switch(_p) ({                                    \
+ printk("PTSWITCH %s %d: %08lx\n", __FILE__, __LINE__, (_p));     \
+ queue_pt_switch(_p);                                             \
+})   
+#define queue_tlb_flush() ({                                      \
+ printk("TLB FLUSH %s %d\n", __FILE__, __LINE__);                 \
+ queue_tlb_flush();                                               \
+})   
+#define queue_invlpg(_p) ({                                       \
+ printk("INVLPG %s %d: %08lx\n", __FILE__, __LINE__, (_p));       \
+ queue_invlpg(_p);                                                \
+})   
+#define queue_pgd_pin(_p) ({                                      \
+ printk("PGD PIN %s %d: %08lx/%08lx\n", __FILE__, __LINE__, (_p), \
+       phys_to_machine(_p));                                     \
+ queue_pgd_pin(_p);                                               \
+})   
+#define queue_pgd_unpin(_p) ({                                    \
+ printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
+ queue_pgd_unpin(_p);                                             \
+})   
+#define queue_pte_pin(_p) ({                                      \
+ printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));      \
+ queue_pte_pin(_p);                                               \
+})   
+#define queue_pte_unpin(_p) ({                                    \
+ printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
+ queue_pte_unpin(_p);                                             \
+})   
+#define queue_set_ldt(_p,_l) ({                                        \
+ printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \
+ queue_set_ldt((_p), (_l));                                            \
+})   
+#endif
+
+void _flush_page_update_queue(void);
+static inline int flush_page_update_queue(void)
+{
+    unsigned int idx = mmu_update_queue_idx;
+    if ( idx != 0 ) _flush_page_update_queue();
+    return idx;
+}
+#define xen_flush_page_update_queue() (_flush_page_update_queue())
+void MULTICALL_flush_page_update_queue(void);
+
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+
+static inline int HYPERVISOR_set_trap_table(trap_info_t *table)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_set_trap_table),
+        "b" (table) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_mmu_update(mmu_update_t *req, int count,
+                                       int *success_count)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 
+        "b" (req), "c" (count), "d" (success_count) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_set_gdt), 
+        "b" (frame_list), "c" (entries) : "memory" );
+
+
+    return ret;
+}
+
+static inline int HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_stack_switch),
+        "b" (ss), "c" (esp) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_set_callbacks(
+    unsigned long event_selector, unsigned long event_address,
+    unsigned long failsafe_selector, unsigned long failsafe_address)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
+        "b" (event_selector), "c" (event_address), 
+        "d" (failsafe_selector), "S" (failsafe_address) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_fpu_taskswitch(void)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_yield(void)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
+        "b" (SCHEDOP_yield) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_block(void)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
+        "b" (SCHEDOP_block) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_shutdown(void)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
+        "b" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
+        : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_reboot(void)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
+        "b" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
+        : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_suspend(unsigned long srec)
+{
+    int ret;
+    /* NB. On suspend, control software expects a suspend record in %esi. */
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
+        "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)), 
+        "S" (srec) : "memory" );
+
+    return ret;
+}
+
+static inline long HYPERVISOR_set_timer_op(u64 timeout)
+{
+    int ret;
+    unsigned long timeout_hi = (unsigned long)(timeout>>32);
+    unsigned long timeout_lo = (unsigned long)timeout;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_set_timer_op),
+        "b" (timeout_hi), "c" (timeout_lo) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_dom0_op(dom0_op_t *dom0_op)
+{
+    int ret;
+    dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_dom0_op),
+        "b" (dom0_op) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_set_debugreg(int reg, unsigned long value)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_set_debugreg),
+        "b" (reg), "c" (value) : "memory" );
+
+    return ret;
+}
+
+static inline unsigned long HYPERVISOR_get_debugreg(int reg)
+{
+    unsigned long ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_get_debugreg),
+        "b" (reg) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_update_descriptor(
+    unsigned long ma, unsigned long word1, unsigned long word2)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_update_descriptor), 
+        "b" (ma), "c" (word1), "d" (word2) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_set_fast_trap(int idx)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_set_fast_trap), 
+        "b" (idx) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_dom_mem_op(unsigned int   op,
+                                        unsigned long *pages,
+                                        unsigned long  nr_pages)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_dom_mem_op),
+        "b" (op), "c" (pages), "d" (nr_pages) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_multicall(void *call_list, int nr_calls)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_multicall),
+        "b" (call_list), "c" (nr_calls) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_update_va_mapping(
+    unsigned long page_nr, pte_t new_val, unsigned long flags)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping), 
+        "b" (page_nr), "c" ((new_val).pte_low), "d" (flags) : "memory" );
+
+    if ( unlikely(ret < 0) )
+        panic("Failed update VA mapping: %08lx, %08lx, %08lx",
+              page_nr, (new_val).pte_low, flags);
+    
+    return ret;
+}
+
+static inline int HYPERVISOR_event_channel_op(void *op)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_event_channel_op),
+        "b" (op) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_xen_version(int cmd)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_xen_version), 
+        "b" (cmd) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_console_io(int cmd, int count, char *str)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_console_io),
+        "b" (cmd), "c" (count), "d" (str) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_physdev_op(void *physdev_op)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_physdev_op),
+        "b" (physdev_op) : "memory" );
+
+    return ret;
+}
+
+static inline int HYPERVISOR_update_va_mapping_otherdomain(
+    unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping_otherdomain), 
+        "b" (page_nr), "c" ((new_val).pte_low), "d" (flags), "S" (domid) :
+        "memory" );
+    
+    return ret;
+}
+
+#endif /* __HYPERVISOR_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h
new file mode 100644 (file)
index 0000000..d76e3e0
--- /dev/null
@@ -0,0 +1,367 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/config.h>
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ *             Linus
+ */
+
+ /*
+  *  Bit simplified and optimized by Jan Hubicka
+  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+  *
+  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+  *  isa_read[wl] and isa_write[wl] fixed
+  *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+  */
+
+#define IO_SPACE_LIMIT 0xffff
+
+#define XQUAD_PORTIO_BASE 0xfe400000
+#define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
+
+#ifdef __KERNEL__
+
+#include <linux/vmalloc.h>
+
+/**
+ *     virt_to_phys    -       map virtual addresses to physical
+ *     @address: address to remap
+ *
+ *     The returned physical address is the physical (CPU) mapping for
+ *     the memory address given. It is only valid to use this function on
+ *     addresses directly mapped or allocated via kmalloc. 
+ *
+ *     This function does not give bus mappings for DMA transfers. In
+ *     almost all conceivable cases a device driver should not be using
+ *     this function
+ */
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+       return __pa(address);
+}
+
+/**
+ *     phys_to_virt    -       map physical address to virtual
+ *     @address: address to remap
+ *
+ *     The returned virtual address is a current CPU mapping for
+ *     the memory address given. It is only valid to use this function on
+ *     addresses that have a kernel mapping
+ *
+ *     This function does not handle bus mappings for DMA transfers. In
+ *     almost all conceivable cases a device driver should not be using
+ *     this function
+ */
+
+static inline void * phys_to_virt(unsigned long address)
+{
+       return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+/**
+ * ioremap     -   map bus memory into CPU space
+ * @offset:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address. 
+ */
+
+static inline void * ioremap (unsigned long offset, unsigned long size)
+{
+       return __ioremap(offset, size, 0);
+}
+
+extern void * ioremap_nocache (unsigned long offset, unsigned long size);
+extern void iounmap(void *addr);
+
+/*
+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ * A boot-time mapping is currently limited to at most 16 pages.
+ */
+extern void *bt_ioremap(unsigned long offset, unsigned long size);
+extern void bt_iounmap(void *addr, unsigned long size);
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+#define isa_virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
+#define isa_page_to_bus(_x) phys_to_machine(page_to_phys(_x))
+#define isa_bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
+#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#define readl(addr) (*(volatile unsigned int *) (addr))
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+
+#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
+#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
+#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
+#define memset_io(a,b,c)       memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c)   __memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c)     __memcpy((void *)(a),(b),(c))
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char *)(PAGE_OFFSET))
+
+#define isa_readb(a) readb(__ISA_IO_base + (a))
+#define isa_readw(a) readw(__ISA_IO_base + (a))
+#define isa_readl(a) readl(__ISA_IO_base + (a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
+#define isa_memset_io(a,b,c)           memset_io(__ISA_IO_base + (a),(b),(c))
+#define isa_memcpy_fromio(a,b,c)       memcpy_fromio((a),__ISA_IO_base + (b),(c))
+#define isa_memcpy_toio(a,b,c)         memcpy_toio(__ISA_IO_base + (a),(b),(c))
+
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d)           eth_copy_and_sum((a),(void *)(b),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d)       eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d))
+
+/**
+ *     check_signature         -       find BIOS signatures
+ *     @io_addr: mmio address to check 
+ *     @signature:  signature block
+ *     @length: length of signature
+ *
+ *     Perform a signature comparison with the mmio address io_addr. This
+ *     address should have been obtained by ioremap.
+ *     Returns 1 on a match.
+ */
+static inline int check_signature(unsigned long io_addr,
+       const unsigned char *signature, int length)
+{
+       int retval = 0;
+       do {
+               if (readb(io_addr) != *signature)
+                       goto out;
+               io_addr++;
+               signature++;
+               length--;
+       } while (length);
+       retval = 1;
+out:
+       return retval;
+}
+
+/**
+ *     isa_check_signature             -       find BIOS signatures
+ *     @io_addr: mmio address to check 
+ *     @signature:  signature block
+ *     @length: length of signature
+ *
+ *     Perform a signature comparison with the ISA mmio address io_addr.
+ *     Returns 1 on a match.
+ *
+ *     This function is deprecated. New drivers should use ioremap and
+ *     check_signature.
+ */
+
+static inline int isa_check_signature(unsigned long io_addr,
+       const unsigned char *signature, int length)
+{
+       int retval = 0;
+       do {
+               if (isa_readb(io_addr) != *signature)
+                       goto out;
+               io_addr++;
+               signature++;
+               length--;
+       } while (length);
+       retval = 1;
+out:
+       return retval;
+}
+
+/*
+ *     Cache management
+ *
+ *     This needed for two cases
+ *     1. Out of order aware processors
+ *     2. Accidentally out of order processors (PPro errata #51)
+ */
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+static inline void flush_write_buffers(void)
+{
+       __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+}
+
+#define dma_cache_inv(_start,_size)            flush_write_buffers()
+#define dma_cache_wback(_start,_size)          flush_write_buffers()
+#define dma_cache_wback_inv(_start,_size)      flush_write_buffers()
+
+#else
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size)            do { } while (0)
+#define dma_cache_wback(_start,_size)          do { } while (0)
+#define dma_cache_wback_inv(_start,_size)      do { } while (0)
+#define flush_write_buffers()
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+#else
+#define __SLOW_DOWN_IO "outb %%al,$0x80;"
+#endif
+
+static inline void slow_down_io(void) {
+       __asm__ __volatile__(
+               __SLOW_DOWN_IO
+#ifdef REALLY_SLOW_IO
+               __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+               : : );
+}
+
+#ifdef CONFIG_X86_NUMAQ
+extern void *xquad_portio;    /* Where the IO area was mapped */
+#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
+       if (xquad_portio) \
+               write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
+       else \
+               out##bwl##_local(value, port); \
+} \
+static inline void out##bwl(unsigned type value, int port) { \
+       out##bwl##_quad(value, port, 0); \
+} \
+static inline unsigned type in##bwl##_quad(int port, int quad) { \
+       if (xquad_portio) \
+               return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
+       else \
+               return in##bwl##_local(port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+       return in##bwl##_quad(port, 0); \
+}
+#else
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl(unsigned type value, int port) { \
+       out##bwl##_local(value, port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+       return in##bwl##_local(port); \
+}
+#endif
+
+
+#define BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local(unsigned type value, int port) { \
+       __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
+} \
+static inline unsigned type in##bwl##_local(int port) { \
+       unsigned type value; \
+       __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
+       return value; \
+} \
+static inline void out##bwl##_local_p(unsigned type value, int port) { \
+       out##bwl##_local(value, port); \
+       slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_local_p(int port) { \
+       unsigned type value = in##bwl##_local(port); \
+       slow_down_io(); \
+       return value; \
+} \
+__BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_p(unsigned type value, int port) { \
+       out##bwl(value, port); \
+       slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_p(int port) { \
+       unsigned type value = in##bwl(port); \
+       slow_down_io(); \
+       return value; \
+} \
+static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
+       __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
+} \
+static inline void ins##bwl(int port, void *addr, unsigned long count) { \
+       __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
+}
+
+BUILDIO(b,b,char)
+BUILDIO(w,w,short)
+BUILDIO(l,,int)
+
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h
new file mode 100644 (file)
index 0000000..8ac7972
--- /dev/null
@@ -0,0 +1,82 @@
+/* defines for inline arch setup functions */
+
+#include <asm/apic.h>
+
+/**
+ * do_timer_interrupt_hook - hook into timer tick
+ * @regs:      standard registers from interrupt
+ *
+ * Description:
+ *     This hook is called immediately after the timer interrupt is ack'd.
+ *     It's primary purpose is to allow architectures that don't possess
+ *     individual per CPU clocks (like the CPU APICs supply) to broadcast the
+ *     timer interrupt as a means of triggering reschedules etc.
+ **/
+
+static inline void do_timer_interrupt_hook(struct pt_regs *regs)
+{
+       do_timer(regs);
+/*
+ * In the SMP case we use the local APIC timer interrupt to do the
+ * profiling, except when we simulate SMP mode on a uniprocessor
+ * system, in that case we have to call the local interrupt handler.
+ */
+#ifndef CONFIG_X86_LOCAL_APIC
+       x86_do_profile(regs);
+#else
+       if (!using_apic_timer)
+               smp_local_timer_interrupt(regs);
+#endif
+}
+
+
+/* you can safely undefine this if you don't have the Neptune chipset */
+
+#define BUGGY_NEPTUN_TIMER
+
+/**
+ * do_timer_overflow - process a detected timer overflow condition
+ * @count:     hardware timer interrupt count on overflow
+ *
+ * Description:
+ *     This call is invoked when the jiffies count has not incremented but
+ *     the hardware timer interrupt has.  It means that a timer tick interrupt
+ *     came along while the previous one was pending, thus a tick was missed
+ **/
+static inline int do_timer_overflow(int count)
+{
+       int i;
+
+       spin_lock(&i8259A_lock);
+       /*
+        * This is tricky when I/O APICs are used;
+        * see do_timer_interrupt().
+        */
+       i = inb(0x20);
+       spin_unlock(&i8259A_lock);
+       
+       /* assumption about timer being IRQ0 */
+       if (i & 0x01) {
+               /*
+                * We cannot detect lost timer interrupts ... 
+                * well, that's why we call them lost, don't we? :)
+                * [hmm, on the Pentium and Alpha we can ... sort of]
+                */
+               count -= LATCH;
+       } else {
+#ifdef BUGGY_NEPTUN_TIMER
+               /*
+                * for the Neptun bug we know that the 'latch'
+                * command doesn't latch the high and low value
+                * of the counter atomically. Thus we have to 
+                * substract 256 from the counter 
+                * ... funny, isnt it? :)
+                */
+               
+               count -= 256;
+#else
+               printk("do_slow_gettimeoffset(): hardware timer problem?\n");
+#endif
+       }
+       return count;
+}
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h
new file mode 100644 (file)
index 0000000..a96d9f6
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ *  arch/i386/mach-generic/io_ports.h
+ *
+ *  Machine specific IO port address definition for generic.
+ *  Written by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_IO_PORTS_H
+#define _MACH_IO_PORTS_H
+
+/* i8253A PIT registers */
+#define PIT_MODE               0x43
+#define PIT_CH0                        0x40
+#define PIT_CH2                        0x42
+
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD         0x20
+#define PIC_MASTER_IMR         0x21
+#define PIC_MASTER_ISR         PIC_MASTER_CMD
+#define PIC_MASTER_POLL                PIC_MASTER_ISR
+#define PIC_MASTER_OCW3                PIC_MASTER_ISR
+#define PIC_SLAVE_CMD          0xa0
+#define PIC_SLAVE_IMR          0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR         2
+#define MASTER_ICW4_DEFAULT    0x01
+#define SLAVE_ICW4_DEFAULT     0x01
+#define PIC_ICW4_AEOI          2
+
+#endif /* !_MACH_IO_PORTS_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
new file mode 100644 (file)
index 0000000..8de8846
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * This file should contain #defines for all of the interrupt vector
+ * numbers used by this architecture.
+ *
+ * In addition, there are some standard defines:
+ *
+ *     FIRST_EXTERNAL_VECTOR:
+ *             The first free place for external interrupts
+ *
+ *     SYSCALL_VECTOR:
+ *             The IRQ vector a syscall makes the user to kernel transition
+ *             under.
+ *
+ *     TIMER_IRQ:
+ *             The IRQ number the timer interrupt comes in at.
+ *
+ *     NR_IRQS:
+ *             The total number of interrupt vectors (including all the
+ *             architecture specific interrupts) needed.
+ *
+ */                    
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR  0x20
+
+#define SYSCALL_VECTOR         0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ *  some of the following vectors are 'rare', they are merged
+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ *  TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+#define SPURIOUS_APIC_VECTOR   0xff
+#define ERROR_APIC_VECTOR      0xfe
+#define INVALIDATE_TLB_VECTOR  0xfd
+#define RESCHEDULE_VECTOR      0xfc
+#define CALL_FUNCTION_VECTOR   0xfb
+
+#define THERMAL_APIC_VECTOR    0xf0
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR     0xef
+
+/*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+ * we start at 0x31 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+#define FIRST_DEVICE_VECTOR    0x31
+#define FIRST_SYSTEM_VECTOR    0xef
+
+/*  #define TIMER_IRQ _EVENT_TIMER */
+
+/*
+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
+ * Right now the APIC is mostly only used for SMP.
+ * 256 vectors is an architectural limit. (we can have
+ * more than 256 devices theoretically, but they will
+ * have to use shared interrupts)
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
+
+#if 0
+/*
+ * The maximum number of vectors supported by i386 processors
+ * is limited to 256. For processors other than i386, NR_VECTORS
+ * should be changed accordingly.
+ */
+#define NR_VECTORS 256
+
+#ifdef CONFIG_PCI_USE_VECTOR
+#define NR_IRQS FIRST_SYSTEM_VECTOR
+#define NR_IRQ_VECTORS NR_IRQS
+#else
+#ifdef CONFIG_X86_IO_APIC
+#define NR_IRQS 224
+# if (224 >= 32 * NR_CPUS)
+# define NR_IRQ_VECTORS NR_IRQS
+# else
+# define NR_IRQ_VECTORS (32 * NR_CPUS)
+# endif
+#else
+#define NR_IRQS 16
+#define NR_IRQ_VECTORS NR_IRQS
+#endif
+#endif
+#endif
+
+#define FPU_IRQ                        13
+
+#define        FIRST_VM86_IRQ          3
+#define LAST_VM86_IRQ          15
+#define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
+
+/*
+ * The flat IRQ space is divided into two regions:
+ *  1. A one-to-one mapping of real physical IRQs. This space is only used
+ *     if we have physical device-access privilege. This region is at the 
+ *     start of the IRQ space so that existing device drivers do not need
+ *     to be modified to translate physical IRQ numbers into our IRQ space.
+ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ *     are bound using the provided bind/unbind functions.
+ */
+
+#define PIRQ_BASE   0
+#define NR_PIRQS  128
+
+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS  128
+
+#define NR_IRQS   (NR_PIRQS + NR_DYNIRQS)
+#define NR_IRQ_VECTORS NR_IRQS
+
+#define pirq_to_irq(_x)   ((_x) + PIRQ_BASE)
+#define irq_to_pirq(_x)   ((_x) - PIRQ_BASE)
+
+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
+
+#ifndef __ASSEMBLY__
+/* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
+extern int  bind_virq_to_irq(int virq);
+extern void unbind_virq_from_irq(int virq);
+extern int  bind_evtchn_to_irq(int evtchn);
+extern void unbind_evtchn_from_irq(int evtchn);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_mpspec.h
new file mode 100644 (file)
index 0000000..cf0b2f6
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __ASM_MACH_MPSPEC_H
+#define __ASM_MACH_MPSPEC_H
+
+/*
+ * a maximum of 16 APICs with the current APIC ID architecture.
+ */
+#define MAX_APICS 16
+
+#define MAX_IRQ_SOURCES 256
+
+#define MAX_MP_BUSSES 32
+
+#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_reboot.h
new file mode 100644 (file)
index 0000000..521e227
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ *  arch/i386/mach-generic/mach_reboot.h
+ *
+ *  Machine specific reboot functions for generic.
+ *  Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_REBOOT_H
+#define _MACH_REBOOT_H
+
+static inline void kb_wait(void)
+{
+       int i;
+
+       for (i = 0; i < 0x10000; i++)
+               if ((inb_p(0x64) & 0x02) == 0)
+                       break;
+}
+
+static inline void mach_reboot(void)
+{
+       int i;
+       for (i = 0; i < 100; i++) {
+               kb_wait();
+               udelay(50);
+               outb(0xfe, 0x64);         /* pulse reset low */
+               udelay(50);
+       }
+}
+
+#endif /* !_MACH_REBOOT_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_resources.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_resources.h
new file mode 100644 (file)
index 0000000..b37858d
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ *  include/asm-i386/mach-default/mach_resources.h
+ *
+ *  Machine specific resource allocation for generic.
+ *  Split out from setup.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_RESOURCES_H
+#define _MACH_RESOURCES_H
+
+struct resource standard_io_resources[] = {
+       { "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
+       { "pic1", 0x20, 0x21, IORESOURCE_BUSY },
+       { "timer", 0x40, 0x5f, IORESOURCE_BUSY },
+       { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
+       { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
+       { "pic2", 0xa0, 0xa1, IORESOURCE_BUSY },
+       { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
+       { "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
+};
+
+#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
+
+static struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
+
+/* System ROM resources */
+#define MAXROMS 6
+static struct resource rom_resources[MAXROMS] = {
+       { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
+       { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
+};
+
+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+
+static inline void probe_video_rom(int roms)
+{
+       unsigned long base;
+       unsigned char *romstart;
+
+
+       /* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
+       for (base = 0xC0000; base < 0xE0000; base += 2048) {
+               romstart = isa_bus_to_virt(base);
+               if (!romsignature(romstart))
+                       continue;
+               request_resource(&iomem_resource, rom_resources + roms);
+               roms++;
+               break;
+       }
+}
+
+static inline void probe_extension_roms(int roms)
+{
+       unsigned long base;
+       unsigned char *romstart;
+
+       /* Extension roms at C800:0000 - DFFF:0000 */
+       for (base = 0xC8000; base < 0xE0000; base += 2048) {
+               unsigned long length;
+
+               romstart = isa_bus_to_virt(base);
+               if (!romsignature(romstart))
+                       continue;
+               length = romstart[2] * 512;
+               if (length) {
+                       unsigned int i;
+                       unsigned char chksum;
+
+                       chksum = 0;
+                       for (i = 0; i < length; i++)
+                               chksum += romstart[i];
+
+                       /* Good checksum? */
+                       if (!chksum) {
+                               rom_resources[roms].start = base;
+                               rom_resources[roms].end = base + length - 1;
+                               rom_resources[roms].name = "Extension ROM";
+                               rom_resources[roms].flags = IORESOURCE_BUSY;
+
+                               request_resource(&iomem_resource, rom_resources + roms);
+                               roms++;
+                               if (roms >= MAXROMS)
+                                       return;
+                       }
+               }
+       }
+
+       /* Final check for motherboard extension rom at E000:0000 */
+       base = 0xE0000;
+       romstart = isa_bus_to_virt(base);
+
+       if (romsignature(romstart)) {
+               rom_resources[roms].start = base;
+               rom_resources[roms].end = base + 65535;
+               rom_resources[roms].name = "Extension ROM";
+               rom_resources[roms].flags = IORESOURCE_BUSY;
+
+               request_resource(&iomem_resource, rom_resources + roms);
+       }
+}
+
+static inline void request_graphics_resource(void)
+{
+       request_resource(&iomem_resource, &vram_resource);
+}
+
+#endif /* !_MACH_RESOURCES_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_time.h
new file mode 100644 (file)
index 0000000..b749aa4
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ *  include/asm-i386/mach-default/mach_time.h
+ *
+ *  Machine specific set RTC function for generic.
+ *  Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_TIME_H
+#define _MACH_TIME_H
+
+#include <linux/mc146818rtc.h>
+
+/* for check timing call set_rtc_mmss() 500ms     */
+/* used in arch/i386/time.c::do_timer_interrupt() */
+#define USEC_AFTER     500000
+#define USEC_BEFORE    500000
+
+/*
+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
+ * called 500 ms after the second nowtime has started, because when
+ * nowtime is written into the registers of the CMOS clock, it will
+ * jump to the next second precisely 500 ms later. Check the Motorola
+ * MC146818A or Dallas DS12887 data sheet for details.
+ *
+ * BUG: This routine does not handle hour overflow properly; it just
+ *      sets the minutes. Usually you'll only notice that after reboot!
+ */
+static inline int mach_set_rtc_mmss(unsigned long nowtime)
+{
+       int retval = 0;
+       int real_seconds, real_minutes, cmos_minutes;
+       unsigned char save_control, save_freq_select;
+
+       save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+       CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+       save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+       CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+       cmos_minutes = CMOS_READ(RTC_MINUTES);
+       if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+               BCD_TO_BIN(cmos_minutes);
+
+       /*
+        * since we're only adjusting minutes and seconds,
+        * don't interfere with hour overflow. This avoids
+        * messing with unknown time zones but requires your
+        * RTC not to be off by more than 15 minutes
+        */
+       real_seconds = nowtime % 60;
+       real_minutes = nowtime / 60;
+       if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+               real_minutes += 30;             /* correct for half hour time zone */
+       real_minutes %= 60;
+
+       if (abs(real_minutes - cmos_minutes) < 30) {
+               if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+                       BIN_TO_BCD(real_seconds);
+                       BIN_TO_BCD(real_minutes);
+               }
+               CMOS_WRITE(real_seconds,RTC_SECONDS);
+               CMOS_WRITE(real_minutes,RTC_MINUTES);
+       } else {
+               printk(KERN_WARNING
+                      "set_rtc_mmss: can't update from %d to %d\n",
+                      cmos_minutes, real_minutes);
+               retval = -1;
+       }
+
+       /* The following flags have to be released exactly in this order,
+        * otherwise the DS12887 (popular MC146818A clone with integrated
+        * battery and quartz) will not reset the oscillator and will not
+        * update precisely 500 ms later. You won't find this mentioned in
+        * the Dallas Semiconductor data sheets, but who believes data
+        * sheets anyway ...                           -- Markus Kuhn
+        */
+       CMOS_WRITE(save_control, RTC_CONTROL);
+       CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+       return retval;
+}
+
+static inline unsigned long mach_get_cmos_time(void)
+{
+       unsigned int year, mon, day, hour, min, sec;
+       int i;
+
+       /* The Linux interpretation of the CMOS clock register contents:
+        * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
+        * RTC registers show the second which has precisely just started.
+        * Let's hope other operating systems interpret the RTC the same way.
+        */
+       /* read RTC exactly on falling edge of update flag */
+       for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
+               if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+                       break;
+       for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
+               if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+                       break;
+       do { /* Isn't this overkill ? UIP above should guarantee consistency */
+               sec = CMOS_READ(RTC_SECONDS);
+               min = CMOS_READ(RTC_MINUTES);
+               hour = CMOS_READ(RTC_HOURS);
+               day = CMOS_READ(RTC_DAY_OF_MONTH);
+               mon = CMOS_READ(RTC_MONTH);
+               year = CMOS_READ(RTC_YEAR);
+       } while (sec != CMOS_READ(RTC_SECONDS));
+       if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+         {
+           BCD_TO_BIN(sec);
+           BCD_TO_BIN(min);
+           BCD_TO_BIN(hour);
+           BCD_TO_BIN(day);
+           BCD_TO_BIN(mon);
+           BCD_TO_BIN(year);
+         }
+       if ((year += 1900) < 1970)
+               year += 100;
+
+       return mktime(year, mon, day, hour, min, sec);
+}
+
+#endif /* !_MACH_TIME_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_timer.h
new file mode 100644 (file)
index 0000000..4b9703b
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ *  include/asm-i386/mach-default/mach_timer.h
+ *
+ *  Machine specific calibrate_tsc() for generic.
+ *  Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+/* ------ Calibrate the TSC ------- 
+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
+ * output busy loop as low as possible. We avoid reading the CTC registers
+ * directly because of the awkward 8-bit access mechanism of the 82C54
+ * device.
+ */
+#ifndef _MACH_TIMER_H
+#define _MACH_TIMER_H
+
+#define CALIBRATE_LATCH        (5 * LATCH)
+
+static inline void mach_prepare_counter(void)
+{
+       /* Set the Gate high, disable speaker */
+       outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+       /*
+        * Now let's take care of CTC channel 2
+        *
+        * Set the Gate high, program CTC channel 2 for mode 0,
+        * (interrupt on terminal count mode), binary count,
+        * load 5 * LATCH count, (LSB and MSB) to begin countdown.
+        *
+        * Some devices need a delay here.
+        */
+       outb(0xb0, 0x43);                       /* binary, mode 0, LSB/MSB, Ch 2 */
+       outb_p(CALIBRATE_LATCH & 0xff, 0x42);   /* LSB of count */
+       outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
+}
+
+static inline void mach_countup(unsigned long *count_p)
+{
+       unsigned long count = 0;
+       do {
+               count++;
+       } while ((inb_p(0x61) & 0x20) == 0);
+       *count_p = count;
+}
+
+#endif /* !_MACH_TIMER_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/mach_traps.h
new file mode 100644 (file)
index 0000000..c98c288
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ *  include/asm-i386/mach-default/mach_traps.h
+ *
+ *  Machine specific NMI handling for generic.
+ *  Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _MACH_TRAPS_H
+#define _MACH_TRAPS_H
+
+static inline void clear_mem_error(unsigned char reason)
+{
+       reason = (reason & 0xf) | 4;
+       outb(reason, 0x61);
+}
+
+static inline unsigned char get_nmi_reason(void)
+{
+       return inb(0x61);
+}
+
+static inline void reassert_nmi(void)
+{
+       outb(0x8f, 0x70);
+       inb(0x71);              /* dummy */
+       outb(0x0f, 0x70);
+       inb(0x71);              /* dummy */
+}
+
+#endif /* !_MACH_TRAPS_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
new file mode 100644 (file)
index 0000000..012932a
--- /dev/null
@@ -0,0 +1,41 @@
+/**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ *
+ * Description:
+ *     This is included late in kernel/setup.c so that it can make
+ *     use of all of the static functions.
+ **/
+
+static inline char * __init machine_specific_memory_setup(void)
+{
+       char *who;
+       unsigned long start_pfn, max_pfn;
+
+       who = "Xen";
+
+       start_pfn = 0;
+       max_pfn = start_info.nr_pages;
+
+       e820.nr_map = 0;
+       add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) - PFN_PHYS(start_pfn), E820_RAM);
+
+       return who;
+}
+
+extern void hypervisor_callback(void);
+extern void failsafe_callback(void);
+
+static inline void __init machine_specific_arch_setup(void)
+{
+       HYPERVISOR_set_callbacks(
+           __KERNEL_CS, (unsigned long)hypervisor_callback,
+           __KERNEL_CS, (unsigned long)failsafe_callback);
+
+       clear_bit(X86_FEATURE_VME, boot_cpu_data.x86_capability);
+       clear_bit(X86_FEATURE_DE, boot_cpu_data.x86_capability);
+       clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+       clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+       clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
+       clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
+       clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
+}
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
new file mode 100644 (file)
index 0000000..957ed99
--- /dev/null
@@ -0,0 +1,5 @@
+/* Hook to call BIOS initialisation function */
+
+#define ARCH_SETUP machine_specific_arch_setup();
+
+static inline void __init machine_specific_arch_setup(void);
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/msr.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/msr.h
new file mode 100644 (file)
index 0000000..574a356
--- /dev/null
@@ -0,0 +1,263 @@
+#ifndef __ASM_MSR_H
+#define __ASM_MSR_H
+
+#include <linux/smp.h>
+#include <asm/hypervisor.h>
+
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(_msr,_val1,_val2) do { \
+       dom0_op_t op; \
+       op.cmd = DOM0_MSR; \
+       op.u.msr.write = 0; \
+       op.u.msr.msr = (_msr); \
+       op.u.msr.cpu_mask = (1 << smp_processor_id()); \
+       HYPERVISOR_dom0_op(&op); \
+       (_val1) = op.u.msr.out1; \
+       (_val2) = op.u.msr.out2; \
+} while(0)
+
+#define wrmsr(_msr,_val1,_val2) do { \
+       dom0_op_t op; \
+       op.cmd = DOM0_MSR; \
+       op.u.msr.write = 1; \
+       op.u.msr.cpu_mask = (1 << smp_processor_id()); \
+       op.u.msr.msr = (_msr); \
+       op.u.msr.in1 = (_val1); \
+       op.u.msr.in2 = (_val2); \
+       HYPERVISOR_dom0_op(&op); \
+} while(0)
+
+#define rdmsrl(msr,val) do { \
+       unsigned long l__,h__; \
+       rdmsr (msr, l__, h__);  \
+       val = l__;  \
+       val |= ((u64)h__<<32);  \
+} while(0)
+
+static inline void wrmsrl (unsigned long msr, unsigned long long val)
+{
+       unsigned long lo, hi;
+       lo = (unsigned long) val;
+       hi = val >> 32;
+       wrmsr (msr, lo, hi);
+}
+
+#define rdtsc(low,high) \
+     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+     __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+     __asm__ __volatile__("rdtsc" : "=A" (val))
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) \
+     __asm__ __volatile__("rdpmc" \
+                         : "=a" (low), "=d" (high) \
+                         : "c" (counter))
+
+/* symbolic names for some interesting MSRs */
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR            0
+#define MSR_IA32_P5_MC_TYPE            1
+#define MSR_IA32_PLATFORM_ID           0x17
+#define MSR_IA32_EBL_CR_POWERON                0x2a
+
+#define MSR_IA32_APICBASE              0x1b
+#define MSR_IA32_APICBASE_BSP          (1<<8)
+#define MSR_IA32_APICBASE_ENABLE       (1<<11)
+#define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE           0x79
+#define MSR_IA32_UCODE_REV             0x8b
+
+#define MSR_P6_PERFCTR0                0xc1
+#define MSR_P6_PERFCTR1                0xc2
+
+#define MSR_IA32_BBL_CR_CTL            0x119
+
+#define MSR_IA32_SYSENTER_CS           0x174
+#define MSR_IA32_SYSENTER_ESP          0x175
+#define MSR_IA32_SYSENTER_EIP          0x176
+
+#define MSR_IA32_MCG_CAP               0x179
+#define MSR_IA32_MCG_STATUS            0x17a
+#define MSR_IA32_MCG_CTL               0x17b
+
+/* P4/Xeon+ specific */
+#define MSR_IA32_MCG_EAX               0x180
+#define MSR_IA32_MCG_EBX               0x181
+#define MSR_IA32_MCG_ECX               0x182
+#define MSR_IA32_MCG_EDX               0x183
+#define MSR_IA32_MCG_ESI               0x184
+#define MSR_IA32_MCG_EDI               0x185
+#define MSR_IA32_MCG_EBP               0x186
+#define MSR_IA32_MCG_ESP               0x187
+#define MSR_IA32_MCG_EFLAGS            0x188
+#define MSR_IA32_MCG_EIP               0x189
+#define MSR_IA32_MCG_RESERVED          0x18A
+
+#define MSR_P6_EVNTSEL0                        0x186
+#define MSR_P6_EVNTSEL1                        0x187
+
+#define MSR_IA32_PERF_STATUS           0x198
+#define MSR_IA32_PERF_CTL              0x199
+
+#define MSR_IA32_THERM_CONTROL         0x19a
+#define MSR_IA32_THERM_INTERRUPT       0x19b
+#define MSR_IA32_THERM_STATUS          0x19c
+#define MSR_IA32_MISC_ENABLE           0x1a0
+
+#define MSR_IA32_DEBUGCTLMSR           0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP      0x1db
+#define MSR_IA32_LASTBRANCHTOIP                0x1dc
+#define MSR_IA32_LASTINTFROMIP         0x1dd
+#define MSR_IA32_LASTINTTOIP           0x1de
+
+#define MSR_IA32_MC0_CTL               0x400
+#define MSR_IA32_MC0_STATUS            0x401
+#define MSR_IA32_MC0_ADDR              0x402
+#define MSR_IA32_MC0_MISC              0x403
+
+/* Pentium IV performance counter MSRs */
+#define MSR_P4_BPU_PERFCTR0            0x300
+#define MSR_P4_BPU_PERFCTR1            0x301
+#define MSR_P4_BPU_PERFCTR2            0x302
+#define MSR_P4_BPU_PERFCTR3            0x303
+#define MSR_P4_MS_PERFCTR0             0x304
+#define MSR_P4_MS_PERFCTR1             0x305
+#define MSR_P4_MS_PERFCTR2             0x306
+#define MSR_P4_MS_PERFCTR3             0x307
+#define MSR_P4_FLAME_PERFCTR0          0x308
+#define MSR_P4_FLAME_PERFCTR1          0x309
+#define MSR_P4_FLAME_PERFCTR2          0x30a
+#define MSR_P4_FLAME_PERFCTR3          0x30b
+#define MSR_P4_IQ_PERFCTR0             0x30c
+#define MSR_P4_IQ_PERFCTR1             0x30d
+#define MSR_P4_IQ_PERFCTR2             0x30e
+#define MSR_P4_IQ_PERFCTR3             0x30f
+#define MSR_P4_IQ_PERFCTR4             0x310
+#define MSR_P4_IQ_PERFCTR5             0x311
+#define MSR_P4_BPU_CCCR0               0x360
+#define MSR_P4_BPU_CCCR1               0x361
+#define MSR_P4_BPU_CCCR2               0x362
+#define MSR_P4_BPU_CCCR3               0x363
+#define MSR_P4_MS_CCCR0                0x364
+#define MSR_P4_MS_CCCR1                0x365
+#define MSR_P4_MS_CCCR2                0x366
+#define MSR_P4_MS_CCCR3                0x367
+#define MSR_P4_FLAME_CCCR0             0x368
+#define MSR_P4_FLAME_CCCR1             0x369
+#define MSR_P4_FLAME_CCCR2             0x36a
+#define MSR_P4_FLAME_CCCR3             0x36b
+#define MSR_P4_IQ_CCCR0                0x36c
+#define MSR_P4_IQ_CCCR1                0x36d
+#define MSR_P4_IQ_CCCR2                0x36e
+#define MSR_P4_IQ_CCCR3                0x36f
+#define MSR_P4_IQ_CCCR4                0x370
+#define MSR_P4_IQ_CCCR5                0x371
+#define MSR_P4_ALF_ESCR0               0x3ca
+#define MSR_P4_ALF_ESCR1               0x3cb
+#define MSR_P4_BPU_ESCR0               0x3b2
+#define MSR_P4_BPU_ESCR1               0x3b3
+#define MSR_P4_BSU_ESCR0               0x3a0
+#define MSR_P4_BSU_ESCR1               0x3a1
+#define MSR_P4_CRU_ESCR0               0x3b8
+#define MSR_P4_CRU_ESCR1               0x3b9
+#define MSR_P4_CRU_ESCR2               0x3cc
+#define MSR_P4_CRU_ESCR3               0x3cd
+#define MSR_P4_CRU_ESCR4               0x3e0
+#define MSR_P4_CRU_ESCR5               0x3e1
+#define MSR_P4_DAC_ESCR0               0x3a8
+#define MSR_P4_DAC_ESCR1               0x3a9
+#define MSR_P4_FIRM_ESCR0              0x3a4
+#define MSR_P4_FIRM_ESCR1              0x3a5
+#define MSR_P4_FLAME_ESCR0             0x3a6
+#define MSR_P4_FLAME_ESCR1             0x3a7
+#define MSR_P4_FSB_ESCR0               0x3a2
+#define MSR_P4_FSB_ESCR1               0x3a3
+#define MSR_P4_IQ_ESCR0                0x3ba
+#define MSR_P4_IQ_ESCR1                0x3bb
+#define MSR_P4_IS_ESCR0                0x3b4
+#define MSR_P4_IS_ESCR1                0x3b5
+#define MSR_P4_ITLB_ESCR0              0x3b6
+#define MSR_P4_ITLB_ESCR1              0x3b7
+#define MSR_P4_IX_ESCR0                0x3c8
+#define MSR_P4_IX_ESCR1                0x3c9
+#define MSR_P4_MOB_ESCR0               0x3aa
+#define MSR_P4_MOB_ESCR1               0x3ab
+#define MSR_P4_MS_ESCR0                0x3c0
+#define MSR_P4_MS_ESCR1                0x3c1
+#define MSR_P4_PMH_ESCR0               0x3ac
+#define MSR_P4_PMH_ESCR1               0x3ad
+#define MSR_P4_RAT_ESCR0               0x3bc
+#define MSR_P4_RAT_ESCR1               0x3bd
+#define MSR_P4_SAAT_ESCR0              0x3ae
+#define MSR_P4_SAAT_ESCR1              0x3af
+#define MSR_P4_SSU_ESCR0               0x3be
+#define MSR_P4_SSU_ESCR1               0x3bf    /* guess: not defined in manual */
+#define MSR_P4_TBPU_ESCR0              0x3c2
+#define MSR_P4_TBPU_ESCR1              0x3c3
+#define MSR_P4_TC_ESCR0                0x3c4
+#define MSR_P4_TC_ESCR1                0x3c5
+#define MSR_P4_U2L_ESCR0               0x3b0
+#define MSR_P4_U2L_ESCR1               0x3b1
+
+/* AMD Defined MSRs */
+#define MSR_K6_EFER                    0xC0000080
+#define MSR_K6_STAR                    0xC0000081
+#define MSR_K6_WHCR                    0xC0000082
+#define MSR_K6_UWCCR                   0xC0000085
+#define MSR_K6_EPMR                    0xC0000086
+#define MSR_K6_PSOR                    0xC0000087
+#define MSR_K6_PFIR                    0xC0000088
+
+#define MSR_K7_EVNTSEL0                        0xC0010000
+#define MSR_K7_EVNTSEL1                        0xC0010001
+#define MSR_K7_EVNTSEL2                        0xC0010002
+#define MSR_K7_EVNTSEL3                        0xC0010003
+#define MSR_K7_PERFCTR0                        0xC0010004
+#define MSR_K7_PERFCTR1                        0xC0010005
+#define MSR_K7_PERFCTR2                        0xC0010006
+#define MSR_K7_PERFCTR3                        0xC0010007
+#define MSR_K7_HWCR                    0xC0010015
+#define MSR_K7_CLK_CTL                 0xC001001b
+#define MSR_K7_FID_VID_CTL             0xC0010041
+#define MSR_K7_FID_VID_STATUS          0xC0010042
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1                   0x107
+#define MSR_IDT_FCR2                   0x108
+#define MSR_IDT_FCR3                   0x109
+#define MSR_IDT_FCR4                   0x10a
+
+#define MSR_IDT_MCR0                   0x110
+#define MSR_IDT_MCR1                   0x111
+#define MSR_IDT_MCR2                   0x112
+#define MSR_IDT_MCR3                   0x113
+#define MSR_IDT_MCR4                   0x114
+#define MSR_IDT_MCR5                   0x115
+#define MSR_IDT_MCR6                   0x116
+#define MSR_IDT_MCR7                   0x117
+#define MSR_IDT_MCR_CTRL               0x120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR                    0x1107
+#define MSR_VIA_LONGHAUL               0x110a
+#define MSR_VIA_RNG                    0x110b
+#define MSR_VIA_BCR2                   0x1147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL          0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS         0x80868011
+#define MSR_TMTA_LRTI_READOUT          0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ         0x8086801a
+
+#endif /* __ASM_MSR_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/page.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/page.h
new file mode 100644 (file)
index 0000000..17aceff
--- /dev/null
@@ -0,0 +1,199 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT     12
+#define PAGE_SIZE      (1UL << PAGE_SHIFT)
+#define PAGE_MASK      (~(PAGE_SIZE-1))
+
+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <asm/hypervisor-ifs/hypervisor-if.h>
+
+#ifdef CONFIG_X86_USE_3DNOW
+
+#include <asm/mmx.h>
+
+#define clear_page(page)       mmx_clear_page((void *)(page))
+#define copy_page(to,from)     mmx_copy_page(to,from)
+
+#else
+
+/*
+ *     On older X86 processors it's not a win to use MMX here it seems.
+ *     Maybe the K6-III ?
+ */
+#define clear_page(page)       memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from)     memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+
+#endif
+
+#define clear_user_page(page, vaddr, pg)       clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+extern unsigned long *phys_to_machine_mapping;
+#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
+#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+static inline unsigned long phys_to_machine(unsigned long phys)
+{
+    unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+    machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+    return machine;
+}
+static inline unsigned long machine_to_phys(unsigned long machine)
+{
+    unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+    phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+    return phys;
+}
+
+/*
+ * These are used to make use of C type-checking..
+ */
+#ifdef CONFIG_X86_PAE
+typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long long pmd; } pmd_t;
+typedef struct { unsigned long long pgd; } pgd_t;
+#define pte_val(x)     ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#define HPAGE_SHIFT    21
+#else
+typedef struct { unsigned long pte_low; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+#define boot_pte_t pte_t /* or would you rather have a typedef */
+#if 0                          /* XXXcl for MMU_UPDATE_DEBUG */
+static inline unsigned long pte_val(pte_t x)
+{
+       unsigned long ret = x.pte_low;
+       if ( (ret & 1) ) ret = machine_to_phys(ret);
+       return ret;
+}
+#else
+#define pte_val(x)     (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : (x).pte_low)
+#endif
+#define pte_val_ma(x)  ((x).pte_low)
+#define HPAGE_SHIFT    22
+#endif
+#define PTE_MASK       PAGE_MASK
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SIZE     ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK     (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
+#endif
+
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+static inline unsigned long pmd_val(pmd_t x)
+{
+    unsigned long ret = x.pmd;
+    if ( (ret & 1) ) ret = machine_to_phys(ret);
+    return ret;
+}
+#define pgd_val(x)     ({ BUG(); (unsigned long)0; })
+#define pgprot_val(x)  ((x).pgprot)
+
+static inline pte_t __pte(unsigned long x)
+{
+       if ( (x & 1) ) x = phys_to_machine(x);
+       return ((pte_t) { (x) });
+}
+#define __pte_ma(x) ((pte_t) { (x) } )
+static inline pmd_t __pmd(unsigned long x)
+{
+       if ( (x & 1) ) x = phys_to_machine(x);
+       return ((pmd_t) { (x) });
+}
+#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
+#define __pgprot(x)    ((pgprot_t) { (x) } )
+
+#endif /* !__ASSEMBLY__ */
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)       (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/*
+ * This handles the memory map.. We could make this a config
+ * option, but too many people screw it up, and too few need
+ * it.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB. 
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+
+/*
+ * This much address space is reserved for vmalloc() and iomap()
+ * as well as fixmap mappings.
+ */
+#define __VMALLOC_RESERVE      (128 << 20)
+
+#ifndef __ASSEMBLY__
+
+/* Pure 2^n version of get_order */
+static __inline__ int get_order(unsigned long size)
+{
+       int order;
+
+       size = (size-1) >> (PAGE_SHIFT-1);
+       order = -1;
+       do {
+               size >>= 1;
+               order++;
+       } while (size);
+       return order;
+}
+
+#endif /* __ASSEMBLY__ */
+
+/* 
+ * XXXcl two options for PAGE_OFFSET
+ * - 0xC0000000:
+ *   change text offset in arch/xen/i386/kernel/vmlinux.lds.S
+ *   change __pa/__va macros
+ * - 0xC0100000:
+ *   change TASK_SIZE 
+ */
+#ifdef __ASSEMBLY__
+#define __PAGE_OFFSET          (0xC0100000)
+#else
+#define __PAGE_OFFSET          (0xC0100000UL)
+#endif
+
+
+#define PAGE_OFFSET            ((unsigned long)__PAGE_OFFSET)
+#define VMALLOC_RESERVE                ((unsigned long)__VMALLOC_RESERVE)
+#define MAXMEM                 (-__PAGE_OFFSET-__VMALLOC_RESERVE)
+#define __pa(x)                        ((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_to_page(pfn)       (mem_map + (pfn))
+#define page_to_pfn(page)      ((unsigned long)((page) - mem_map))
+#define pfn_valid(pfn)         ((pfn) < max_mapnr)
+#endif /* !CONFIG_DISCONTIGMEM */
+#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS  (VM_READ | VM_WRITE | VM_EXEC | \
+                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(_a)    (phys_to_machine(__pa(_a)))
+#define machine_to_virt(_m)    (__va(machine_to_phys(_m)))
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/param.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/param.h
new file mode 100644 (file)
index 0000000..8ac120c
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef _ASMi386_PARAM_H
+#define _ASMi386_PARAM_H
+
+#ifdef __KERNEL__
+# define HZ            100/*  0 */             /* Internal kernel timer frequency */
+# define USER_HZ       100             /* .. some user interfaces are in "ticks" */
+# define CLOCKS_PER_SEC                (USER_HZ)       /* like times() */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE  4096
+
+#ifndef NOGROUP
+#define NOGROUP                (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64      /* max length of hostname */
+
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgalloc.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
new file mode 100644 (file)
index 0000000..422cdc5
--- /dev/null
@@ -0,0 +1,59 @@
+#ifndef _I386_PGALLOC_H
+#define _I386_PGALLOC_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+#include <linux/mm.h>          /* for struct page */
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+               set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
+{
+       set_pmd(pmd, __pmd(_PAGE_TABLE +
+               ((unsigned long long)page_to_pfn(pte) <<
+                       (unsigned long long) PAGE_SHIFT)));
+       flush_page_update_queue(); /* XXXcl flush */
+}
+/*
+ * Allocate and free page tables.
+ */
+
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(pgd_t *pgd);
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
+extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
+
+static inline void pte_free_kernel(pte_t *pte)
+{
+       free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct page *pte)
+{
+       __free_page(pte);
+}
+
+
+#define __pte_free_tlb(tlb,pte) do {                   \
+       tlb_remove_page((tlb),(pte));                   \
+       flush_page_update_queue(); /* XXXcl flush */    \
+} while (0)
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ * (In the PAE case we free the pmds as part of the pgd.)
+ */
+
+#define pmd_alloc_one(mm, addr)                ({ BUG(); ((pmd_t *)2); })
+#define pmd_free(x)                    do { } while (0)
+#define __pmd_free_tlb(tlb,x)          do { } while (0)
+#define pgd_populate(mm, pmd, pte)     BUG()
+
+#define check_pgt_cache()      do { } while (0)
+
+#endif /* _I386_PGALLOC_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
new file mode 100644 (file)
index 0000000..65a53ef
--- /dev/null
@@ -0,0 +1,98 @@
+#ifndef _I386_PGTABLE_2LEVEL_H
+#define _I386_PGTABLE_2LEVEL_H
+
+/*
+ * traditional i386 two-level paging structure:
+ */
+
+#define PGDIR_SHIFT    22
+#define PTRS_PER_PGD   1024
+#define PTRS_PER_PGD_NO_HV     (HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
+
+/*
+ * the i386 is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+#define PMD_SHIFT      22
+#define PTRS_PER_PMD   1
+
+#define PTRS_PER_PTE   1024
+
+#define pte_ERROR(e) \
+       printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+#define pmd_ERROR(e) \
+       printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+       printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd)          { return 0; }
+static inline int pgd_bad(pgd_t pgd)           { return 0; }
+static inline int pgd_present(pgd_t pgd)       { return 1; }
+#define pgd_clear(xp)                          do { } while (0)
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
+#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
+/*
+ * (pmds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) queue_l2_entry_update((pmdptr), (pmdval).pmd)
+#define set_pgd(pgdptr, pgdval) ((void)0)
+
+#define pgd_page(pgd) \
+((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
+
+static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+{
+       return (pmd_t *) dir;
+}
+
+/*
+ * A note on implementation of this atomic 'get-and-clear' operation.
+ * This is actually very simple because Xen Linux can only run on a single
+ * processor. Therefore, we cannot race other processors setting the 'accessed'
+ * or 'dirty' bits on a page-table entry.
+ * Even if pages are shared between domains, that is not a problem because
+ * each domain will have separate page tables, with their own versions of
+ * accessed & dirty state.
+ */
+static inline pte_t ptep_get_and_clear(pte_t *xp)
+{
+       pte_t pte = *xp;
+       if (pte.pte_low)
+               queue_l1_entry_update(xp, 0);
+       return pte;
+}
+
+#define pte_same(a, b)         ((a).pte_low == (b).pte_low)
+#define pte_page(x)            pfn_to_page(pte_pfn(x))
+#define pte_none(x)            (!(x).pte_low)
+/* XXXcl check valid because msync.c:filemap_sync_pte calls without pte_present check */
+#define pte_pfn(x)             ((unsigned long)((((x).pte_low & 1 ? machine_to_phys((x).pte_low) : (x).pte_low) >> PAGE_SHIFT)))
+#define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pte_ma(pfn, prot)  __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot)     __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+/*
+ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
+ * into this range:
+ */
+#define PTE_FILE_MAX_BITS      29
+
+#define pte_to_pgoff(pte) \
+       ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
+
+#define pgoff_to_pte(off) \
+       ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
+
+#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h
new file mode 100644 (file)
index 0000000..e9e9410
--- /dev/null
@@ -0,0 +1,457 @@
+#ifndef _I386_PGTABLE_H
+#define _I386_PGTABLE_H
+
+#include <linux/config.h>
+#include <asm/hypervisor.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the i386, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * i386 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the i386 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+
+#ifndef _I386_BITOPS_H
+#include <asm/bitops.h>
+#endif
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+extern unsigned long empty_zero_page[1024];
+extern pgd_t swapper_pg_dir[1024];
+extern kmem_cache_t *pgd_cache;
+extern kmem_cache_t *pmd_cache;
+extern spinlock_t pgd_lock;
+extern struct page *pgd_list;
+
+void pmd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+void pgtable_cache_init(void);
+void paging_init(void);
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level.h>
+#else
+# include <asm/pgtable-2level.h>
+#endif
+#endif
+
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD      (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR      0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define TWOLEVEL_PGDIR_SHIFT   22
+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
+
+
+#ifndef __ASSEMBLY__
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+extern void * high_memory;
+#define VMALLOC_START  (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
+                                               ~(VMALLOC_OFFSET-1))
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END   (PKMAP_BASE-2*PAGE_SIZE)
+#else
+# define VMALLOC_END   (FIXADDR_START-2*PAGE_SIZE)
+#endif
+
+/*
+ * The 4MB page is guessing..  Detailed in the infamous "Chapter H"
+ * of the Pentium details, but assuming intel did the straightforward
+ * thing, this bit set in the page directory entry just means that
+ * the page directory entry points directly to a 4MB-aligned block of
+ * memory. 
+ */
+#define _PAGE_BIT_PRESENT      0
+#define _PAGE_BIT_RW           1
+#define _PAGE_BIT_USER         2
+#define _PAGE_BIT_PWT          3
+#define _PAGE_BIT_PCD          4
+#define _PAGE_BIT_ACCESSED     5
+#define _PAGE_BIT_DIRTY                6
+#define _PAGE_BIT_PSE          7       /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
+#define _PAGE_BIT_UNUSED1      9       /* available for programmer */
+#define _PAGE_BIT_UNUSED2      10
+#define _PAGE_BIT_UNUSED3      11
+
+#define _PAGE_PRESENT  0x001
+#define _PAGE_RW       0x002
+#define _PAGE_USER     0x004
+#define _PAGE_PWT      0x008
+#define _PAGE_PCD      0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY    0x040
+#define _PAGE_PSE      0x080   /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_GLOBAL   0x100   /* Global TLB entry PPro+ */
+#define _PAGE_UNUSED1  0x200   /* available for programmer */
+#define _PAGE_UNUSED2  0x400
+#define _PAGE_UNUSED3  0x800
+
+#define _PAGE_FILE     0x040   /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE 0x080   /* If not present */
+
+#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
+#define _PAGE_KERNEL \
+       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+extern unsigned long __PAGE_KERNEL;
+#define __PAGE_KERNEL_RO       (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_NOCACHE  (__PAGE_KERNEL | _PAGE_PCD)
+#define __PAGE_KERNEL_LARGE    (__PAGE_KERNEL | _PAGE_PSE)
+
+#define PAGE_KERNEL            __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO         __pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE    __pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE)
+
+/*
+ * The i386 can't do page protection for execute, and considers that
+ * the same are read. Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+
+/*
+ * Define this if things work differently on an i386 and an i486:
+ * it will (on an i486) warn about kernel memory accesses that are
+ * done without a 'verify_area(VERIFY_WRITE,..)'
+ */
+#undef TEST_VERIFY_AREA
+
+/* The boot page tables (all created as a single array) */
+extern unsigned long pg0[];
+
+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(xp)  do { set_pte(xp, __pte(0)); } while (0)
+
+#define pmd_none(x)    (!pmd_val(x))
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+/* pmd_clear below */
+#define        pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_user(pte_t pte)          { return (pte).pte_low & _PAGE_USER; }
+static inline int pte_read(pte_t pte)          { return (pte).pte_low & _PAGE_USER; }
+static inline int pte_exec(pte_t pte)          { return (pte).pte_low & _PAGE_USER; }
+static inline int pte_dirty(pte_t pte)         { return (pte).pte_low & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte)         { return (pte).pte_low & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte)         { return (pte).pte_low & _PAGE_RW; }
+
+/*
+ * The following only works if pte_present() is not true.
+ */
+static inline int pte_file(pte_t pte)          { return (pte).pte_low & _PAGE_FILE; }
+
+static inline pte_t pte_rdprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_exprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_mkclean(pte_t pte)     { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte)       { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte)   { (pte).pte_low &= ~_PAGE_RW; return pte; }
+static inline pte_t pte_mkread(pte_t pte)      { (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkexec(pte_t pte)      { (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)     { (pte).pte_low |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)     { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte)     { (pte).pte_low |= _PAGE_RW; return pte; }
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+       unsigned long pteval = *(unsigned long *)ptep;
+       int ret = pteval & _PAGE_DIRTY;
+       if (ret)
+               queue_l1_entry_update(ptep, pteval & ~_PAGE_DIRTY);
+       return ret;
+}
+static inline  int ptep_test_and_clear_young(pte_t *ptep)
+{
+       unsigned long pteval = *(unsigned long *)ptep;
+       int ret = pteval & _PAGE_ACCESSED;
+       if (ret)
+               queue_l1_entry_update(ptep, pteval & ~_PAGE_ACCESSED);
+       return ret;
+}
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+       unsigned long pteval = *(unsigned long *)ptep;
+       if ((pteval & _PAGE_RW))
+               queue_l1_entry_update(ptep, pteval & ~_PAGE_RW);
+}
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+       unsigned long pteval = *(unsigned long *)ptep;
+       if (!(pteval & _PAGE_DIRTY))
+               queue_l1_entry_update(ptep, pteval | _PAGE_DIRTY);
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".  On processors which do not support
+ * it, this is a no-op.
+ */
+#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3)                                          \
+                                ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       pte.pte_low &= _PAGE_CHG_MASK;
+       pte.pte_low |= pgprot_val(newprot);
+       return pte;
+}
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+#define pmd_page_kernel(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+#define pmd_clear(xp)  do {                                    \
+       pmd_t p = *(xp);                                        \
+       set_pmd(xp, __pmd(0));                                  \
+       __make_page_writeable((void *)pmd_page_kernel(p));      \
+} while (0)
+
+#ifndef CONFIG_DISCONTIGMEM
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#define pmd_large(pmd) \
+       ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
+
+/*
+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
+ *
+ * this macro returns the index of the entry in the pgd page which would
+ * control the given virtual address
+ */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+/*
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+#define pmd_index(address) \
+               (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define pte_index(address) \
+               (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+       ((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+       ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
+#define pte_offset_map_nested(dir, address) \
+       ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#else
+#define pte_offset_map(dir, address) \
+       ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+#endif
+
+/*
+ * The i386 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ *
+ * Also, we only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time.
+ */
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+       do {                                                              \
+               if (__dirty) {                                            \
+                       queue_l1_entry_update((__ptep), (__entry).pte_low); \
+                       flush_tlb_page(__vma, __address);                 \
+               }                                                         \
+       } while (0)
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x)                  (((x).val >> 1) & 0x1f)
+#define __swp_offset(x)                        ((x).val >> 8)
+#define __swp_entry(type, offset)      ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __pte_to_swp_entry(pte)                ((swp_entry_t) { (pte).pte_low })
+#define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
+
+static inline void __make_page_readonly(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+}
+
+static inline void __make_page_writeable(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+}
+
+static inline void make_page_readonly(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+       if ( (unsigned long)va >= VMALLOC_START )
+               __make_page_readonly(machine_to_virt(
+                       *(unsigned long *)pte&PAGE_MASK));
+}
+
+static inline void make_page_writeable(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+       if ( (unsigned long)va >= VMALLOC_START )
+               __make_page_writeable(machine_to_virt(
+                       *(unsigned long *)pte&PAGE_MASK));
+}
+
+static inline void make_pages_readonly(void *va, unsigned int nr)
+{
+       while ( nr-- != 0 )
+       {
+               make_page_readonly(va);
+               va = (void *)((unsigned long)va + PAGE_SIZE);
+       }
+}
+
+static inline void make_pages_writeable(void *va, unsigned int nr)
+{
+       while ( nr-- != 0 )
+       {
+               make_page_writeable(va);
+               va = (void *)((unsigned long)va + PAGE_SIZE);
+       }
+}
+
+static inline unsigned long arbitrary_virt_to_phys(void *va)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long)va);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
+       pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
+       unsigned long pa = (*(unsigned long *)pte) & PAGE_MASK;
+       return pa | ((unsigned long)va & (PAGE_SIZE-1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_DISCONTIGMEM
+#define kern_addr_valid(addr)  (1)
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#define io_remap_page_range remap_page_range
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* _I386_PGTABLE_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/processor.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/processor.h
new file mode 100644 (file)
index 0000000..6502baa
--- /dev/null
@@ -0,0 +1,667 @@
+/*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_I386_PROCESSOR_H
+#define __ASM_I386_PROCESSOR_H
+
+#include <asm/vm86.h>
+#include <asm/math_emu.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
+#include <asm/system.h>
+#include <linux/cache.h>
+#include <linux/config.h>
+#include <linux/threads.h>
+
+/* flag for disabling the tsc */
+extern int tsc_disable;
+
+struct desc_struct {
+       unsigned long a,b;
+};
+
+#define desc_empty(desc) \
+               (!((desc)->a + (desc)->b))
+
+#define desc_equal(desc1, desc2) \
+               (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+
+/*
+ *  CPU type and hardware bug flags. Kept separately for each CPU.
+ *  Members of this structure are referenced in head.S, so think twice
+ *  before touching them. [mj]
+ */
+
+struct cpuinfo_x86 {
+       __u8    x86;            /* CPU family */
+       __u8    x86_vendor;     /* CPU vendor */
+       __u8    x86_model;
+       __u8    x86_mask;
+       char    wp_works_ok;    /* It doesn't on 386's */
+       char    hlt_works_ok;   /* Problems on some 486Dx4's and old 386's */
+       char    hard_math;
+       char    rfu;
+               int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
+       unsigned long   x86_capability[NCAPINTS];
+       char    x86_vendor_id[16];
+       char    x86_model_id[64];
+       int     x86_cache_size;  /* in KB - valid for CPUS which support this
+                                   call  */
+       int     x86_cache_alignment;    /* In bytes */
+       int     fdiv_bug;
+       int     f00f_bug;
+       int     coma_bug;
+       unsigned long loops_per_jiffy;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_NEXGEN 4
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_RISE 6
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_NUM 9
+#define X86_VENDOR_UNKNOWN 0xff
+
+/*
+ * capabilities of CPUs
+ */
+
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct cpuinfo_x86 new_cpu_data;
+extern struct tss_struct init_tss[NR_CPUS];
+extern struct tss_struct doublefault_tss;
+extern pgd_t *cur_pgd;         /* XXXsmp */
+
+#ifdef CONFIG_SMP
+extern struct cpuinfo_x86 cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+#endif
+
+extern char ignore_fpu_irq;
+
+extern void identify_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+extern void dodgy_tsc(void);
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF  0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF  0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF  0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF  0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF  0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF  0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF  0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF  0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF  0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL        0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT  0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF  0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM  0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC  0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID  0x00200000 /* CPUID detection flag */
+
+/*
+ * Generic CPUID function
+ */
+static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+{
+       __asm__("cpuid"
+               : "=a" (*eax),
+                 "=b" (*ebx),
+                 "=c" (*ecx),
+                 "=d" (*edx)
+               : "0" (op));
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+       unsigned int eax;
+
+       __asm__("cpuid"
+               : "=a" (eax)
+               : "0" (op)
+               : "bx", "cx", "dx");
+       return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+       unsigned int eax, ebx;
+
+       __asm__("cpuid"
+               : "=a" (eax), "=b" (ebx)
+               : "0" (op)
+               : "cx", "dx" );
+       return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+       unsigned int eax, ecx;
+
+       __asm__("cpuid"
+               : "=a" (eax), "=c" (ecx)
+               : "0" (op)
+               : "bx", "dx" );
+       return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+       unsigned int eax, edx;
+
+       __asm__("cpuid"
+               : "=a" (eax), "=d" (edx)
+               : "0" (op)
+               : "bx", "cx");
+       return edx;
+}
+
+#define load_cr3(pgdir) do {                           \
+       queue_pt_switch(__pa(pgdir));                   \
+       flush_page_update_queue();                      \
+       cur_pgd = pgdir;        /* XXXsmp */            \
+} while (/* CONSTCOND */0)
+
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME            0x0001  /* enable vm86 extensions */
+#define X86_CR4_PVI            0x0002  /* virtual interrupts flag enable */
+#define X86_CR4_TSD            0x0004  /* disable time stamp at ipl 3 */
+#define X86_CR4_DE             0x0008  /* enable debugging extensions */
+#define X86_CR4_PSE            0x0010  /* enable page size extensions */
+#define X86_CR4_PAE            0x0020  /* enable physical address extensions */
+#define X86_CR4_MCE            0x0040  /* Machine check enable */
+#define X86_CR4_PGE            0x0080  /* enable global pages */
+#define X86_CR4_PCE            0x0100  /* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR         0x0200  /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT     0x0400  /* enable unmasked SSE exceptions */
+
+/*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+extern unsigned long mmu_cr4_features;
+
+static inline void set_in_cr4 (unsigned long mask)
+{
+       mmu_cr4_features |= mask;
+       switch (mask) {
+       case X86_CR4_OSFXSR:
+       case X86_CR4_OSXMMEXCPT:
+               break;
+       default:
+               printk("Xen unsupported cr4 update\n");
+               BUG();
+       }
+}
+
+static inline void clear_in_cr4 (unsigned long mask)
+{
+       mmu_cr4_features &= ~mask;
+       __asm__("movl %%cr4,%%eax\n\t"
+               "andl %0,%%eax\n\t"
+               "movl %%eax,%%cr4\n"
+               : : "irg" (~mask)
+               :"ax");
+}
+
+/*
+ *      NSC/Cyrix CPU configuration register indexes
+ */
+
+#define CX86_PCR0 0x20
+#define CX86_GCR  0xb8
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
+#define CX86_CCR7 0xeb
+#define CX86_PCR1 0xf0
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
+
+/*
+ *      NSC/Cyrix CPU indexed register access macros
+ */
+
+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86(reg, data) do { \
+       outb((reg), 0x22); \
+       outb((data), 0x23); \
+} while (0)
+
+/*
+ * Bus types (default is ISA, but people can check others with these..)
+ * pc98 indicates PC98 systems (CBUS)
+ */
+extern int MCA_bus;
+#ifdef CONFIG_X86_PC9800
+#define pc98 1
+#else
+#define pc98 0
+#endif
+
+static inline void __monitor(const void *eax, unsigned long ecx,
+               unsigned long edx)
+{
+       /* "monitor %eax,%ecx,%edx;" */
+       asm volatile(
+               ".byte 0x0f,0x01,0xc8;"
+               : :"a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+       /* "mwait %eax,%ecx;" */
+       asm volatile(
+               ".byte 0x0f,0x01,0xc9;"
+               : :"a" (eax), "c" (ecx));
+}
+
+/* from system description table in BIOS.  Mostly for MCA use, but
+others may find it useful. */
+extern unsigned int machine_id;
+extern unsigned int machine_submodel_id;
+extern unsigned int BIOS_revision;
+extern unsigned int mca_pentium_flag;
+
+/*
+ * User space process size: 3GB (default).
+ */
+#define TASK_SIZE      (PAGE_OFFSET & PGDIR_MASK)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 3))
+
+/*
+ * Size of io_bitmap, covering ports 0 to 0x3ff.
+ */
+#define IO_BITMAP_BITS  1024
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+
+struct i387_fsave_struct {
+       long    cwd;
+       long    swd;
+       long    twd;
+       long    fip;
+       long    fcs;
+       long    foo;
+       long    fos;
+       long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
+       long    status;         /* software status information */
+};
+
+struct i387_fxsave_struct {
+       unsigned short  cwd;
+       unsigned short  swd;
+       unsigned short  twd;
+       unsigned short  fop;
+       long    fip;
+       long    fcs;
+       long    foo;
+       long    fos;
+       long    mxcsr;
+       long    mxcsr_mask;
+       long    st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
+       long    xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
+       long    padding[56];
+} __attribute__ ((aligned (16)));
+
+struct i387_soft_struct {
+       long    cwd;
+       long    swd;
+       long    twd;
+       long    fip;
+       long    fcs;
+       long    foo;
+       long    fos;
+       long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
+       unsigned char   ftop, changed, lookahead, no_update, rm, alimit;
+       struct info     *info;
+       unsigned long   entry_eip;
+};
+
+union i387_union {
+       struct i387_fsave_struct        fsave;
+       struct i387_fxsave_struct       fxsave;
+       struct i387_soft_struct soft;
+};
+
+typedef struct {
+       unsigned long seg;
+} mm_segment_t;
+
+struct tss_struct {
+       unsigned short  back_link,__blh;
+       unsigned long   esp0;
+       unsigned short  ss0,__ss0h;
+       unsigned long   esp1;
+       unsigned short  ss1,__ss1h;     /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
+       unsigned long   esp2;
+       unsigned short  ss2,__ss2h;
+       unsigned long   __cr3;
+       unsigned long   eip;
+       unsigned long   eflags;
+       unsigned long   eax,ecx,edx,ebx;
+       unsigned long   esp;
+       unsigned long   ebp;
+       unsigned long   esi;
+       unsigned long   edi;
+       unsigned short  es, __esh;
+       unsigned short  cs, __csh;
+       unsigned short  ss, __ssh;
+       unsigned short  ds, __dsh;
+       unsigned short  fs, __fsh;
+       unsigned short  gs, __gsh;
+       unsigned short  ldt, __ldth;
+       unsigned short  trace, io_bitmap_base;
+       /*
+        * The extra 1 is there because the CPU will access an
+        * additional byte beyond the end of the IO permission
+        * bitmap. The extra byte must be all 1 bits, and must
+        * be within the limit.
+        */
+       unsigned long   io_bitmap[IO_BITMAP_LONGS + 1];
+       /*
+        * pads the TSS to be cacheline-aligned (size is 0x100)
+        */
+       unsigned long __cacheline_filler[5];
+       /*
+        * .. and then another 0x100 bytes for emergency kernel stack
+        */
+       unsigned long stack[64];
+} __attribute__((packed));
+
+#define ARCH_MIN_TASKALIGN     16
+
+struct thread_struct {
+/* cached TLS descriptors. */
+       struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+       unsigned long   esp0;
+       unsigned long   sysenter_cs;
+       unsigned long   eip;
+       unsigned long   esp;
+       unsigned long   fs;
+       unsigned long   gs;
+/* Hardware debugging registers */
+       unsigned long   debugreg[8];  /* %%db0-7 debug registers */
+/* fault info */
+       unsigned long   cr2, trap_no, error_code;
+/* floating point info */
+       union i387_union        i387;
+/* virtual 86 mode info */
+       struct vm86_struct __user * vm86_info;
+       unsigned long           screen_bitmap;
+       unsigned long           v86flags, v86mask, saved_esp0;
+       unsigned int            saved_fs, saved_gs;
+/* IO permissions */
+       unsigned long   *io_bitmap_ptr;
+};
+
+#define INIT_THREAD  {                                                 \
+       .vm86_info = NULL,                                              \
+       .sysenter_cs = __KERNEL_CS,                                     \
+       .io_bitmap_ptr = NULL,                                          \
+}
+
+/*
+ * Note that the .io_bitmap member must be extra-big. This is because
+ * the CPU will access an additional byte beyond the end of the IO
+ * permission bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+#define INIT_TSS  {                                                    \
+       .esp0           = sizeof(init_stack) + (long)&init_stack,       \
+       .ss0            = __KERNEL_DS,                                  \
+       .esp1           = sizeof(init_tss[0]) + (long)&init_tss[0],     \
+       .ss1            = __KERNEL_CS,                                  \
+       .ldt            = GDT_ENTRY_LDT,                                \
+       .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,                     \
+       .io_bitmap      = { [ 0 ... IO_BITMAP_LONGS] = ~0 },            \
+}
+
+static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+{
+       tss->esp0 = thread->esp0;
+       /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+       if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+               tss->ss1 = thread->sysenter_cs;
+               wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+       }
+       HYPERVISOR_stack_switch(tss->ss0, tss->esp0);
+}
+
+#define start_thread(regs, new_eip, new_esp) do {              \
+       __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));       \
+       set_fs(USER_DS);                                        \
+       regs->xds = __USER_DS;                                  \
+       regs->xes = __USER_DS;                                  \
+       regs->xss = __USER_DS;                                  \
+       regs->xcs = __USER_CS;                                  \
+       regs->eip = new_eip;                                    \
+       regs->esp = new_esp;                                    \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+extern unsigned long thread_saved_pc(struct task_struct *tsk);
+void show_trace(struct task_struct *task, unsigned long *stack);
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
+#define KSTK_TOP(info)                                                 \
+({                                                                     \
+       unsigned long *__ptr = (unsigned long *)(info);                 \
+       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
+})
+
+#define task_pt_regs(task)                                             \
+({                                                                     \
+       struct pt_regs *__regs__;                                       \
+       __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info);     \
+       __regs__ - 1;                                                   \
+})
+
+#define KSTK_EIP(task) (task_pt_regs(task)->eip)
+#define KSTK_ESP(task) (task_pt_regs(task)->esp)
+
+
+struct microcode_header {
+       unsigned int hdrver;
+       unsigned int rev;
+       unsigned int date;
+       unsigned int sig;
+       unsigned int cksum;
+       unsigned int ldrver;
+       unsigned int pf;
+       unsigned int datasize;
+       unsigned int totalsize;
+       unsigned int reserved[3];
+};
+
+struct microcode {
+       struct microcode_header hdr;
+       unsigned int bits[0];
+};
+
+typedef struct microcode microcode_t;
+typedef struct microcode_header microcode_header_t;
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+       unsigned int sig;
+       unsigned int pf;
+       unsigned int cksum;
+};
+
+struct extended_sigtable {
+       unsigned int count;
+       unsigned int cksum;
+       unsigned int reserved[3];
+       struct extended_signature sigs[0];
+};
+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+#define MICROCODE_IOCFREE      _IO('6',0)
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+       __asm__ __volatile__("rep;nop": : :"memory");
+}
+
+#define cpu_relax()    rep_nop()
+
+/* generic versions from gas */
+#define GENERIC_NOP1   ".byte 0x90\n"
+#define GENERIC_NOP2           ".byte 0x89,0xf6\n"
+#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
+#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
+#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
+#define GENERIC_NOP6   ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP7   ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
+#define GENERIC_NOP8   GENERIC_NOP1 GENERIC_NOP7
+
+/* Opteron nops */
+#define K8_NOP1 GENERIC_NOP1
+#define K8_NOP2        ".byte 0x66,0x90\n" 
+#define K8_NOP3        ".byte 0x66,0x66,0x90\n" 
+#define K8_NOP4        ".byte 0x66,0x66,0x66,0x90\n" 
+#define K8_NOP5        K8_NOP3 K8_NOP2 
+#define K8_NOP6        K8_NOP3 K8_NOP3
+#define K8_NOP7        K8_NOP4 K8_NOP3
+#define K8_NOP8        K8_NOP4 K8_NOP4
+
+/* K7 nops */
+/* uses eax dependencies (arbitary choice) */
+#define K7_NOP1  GENERIC_NOP1
+#define K7_NOP2        ".byte 0x8b,0xc0\n" 
+#define K7_NOP3        ".byte 0x8d,0x04,0x20\n"
+#define K7_NOP4        ".byte 0x8d,0x44,0x20,0x00\n"
+#define K7_NOP5        K7_NOP4 ASM_NOP1
+#define K7_NOP6        ".byte 0x8d,0x80,0,0,0,0\n"
+#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
+#define K7_NOP8        K7_NOP7 ASM_NOP1
+
+#ifdef CONFIG_MK8
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
+#elif defined(CONFIG_MK7)
+#define ASM_NOP1 K7_NOP1
+#define ASM_NOP2 K7_NOP2
+#define ASM_NOP3 K7_NOP3
+#define ASM_NOP4 K7_NOP4
+#define ASM_NOP5 K7_NOP5
+#define ASM_NOP6 K7_NOP6
+#define ASM_NOP7 K7_NOP7
+#define ASM_NOP8 K7_NOP8
+#else
+#define ASM_NOP1 GENERIC_NOP1
+#define ASM_NOP2 GENERIC_NOP2
+#define ASM_NOP3 GENERIC_NOP3
+#define ASM_NOP4 GENERIC_NOP4
+#define ASM_NOP5 GENERIC_NOP5
+#define ASM_NOP6 GENERIC_NOP6
+#define ASM_NOP7 GENERIC_NOP7
+#define ASM_NOP8 GENERIC_NOP8
+#endif
+
+#define ASM_NOP_MAX 8
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+/* It's not worth to care about 3dnow! prefetches for the K6
+   because they are microcoded there and very slow.
+   However we don't do prefetches for pre XP Athlons currently
+   That should be fixed. */
+#define ARCH_HAS_PREFETCH
+extern inline void prefetch(const void *x)
+{
+       alternative_input(ASM_NOP4,
+                         "prefetchnta (%1)",
+                         X86_FEATURE_XMM,
+                         "r" (x));
+}
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+/* 3dnow! prefetch to get an exclusive cache line. Useful for 
+   spinlocks to avoid one state transition in the cache coherency protocol. */
+extern inline void prefetchw(const void *x)
+{
+       alternative_input(ASM_NOP4,
+                         "prefetchw (%1)",
+                         X86_FEATURE_3DNOW,
+                         "r" (x));
+}
+#define spin_lock_prefetch(x)  prefetchw(x)
+
+extern void select_idle_routine(const struct cpuinfo_x86 *c);
+
+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+#ifdef CONFIG_SCHED_SMT
+#define ARCH_HAS_SCHED_DOMAIN
+#define ARCH_HAS_SCHED_WAKE_IDLE
+#endif
+
+#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/ptrace.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/ptrace.h
new file mode 100644 (file)
index 0000000..0165ccc
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef _I386_PTRACE_H
+#define _I386_PTRACE_H
+
+#define EBX 0
+#define ECX 1
+#define EDX 2
+#define ESI 3
+#define EDI 4
+#define EBP 5
+#define EAX 6
+#define DS 7
+#define ES 8
+#define FS 9
+#define GS 10
+#define ORIG_EAX 11
+#define EIP 12
+#define CS  13
+#define EFL 14
+#define UESP 15
+#define SS   16
+#define FRAME_SIZE 17
+
+/* this struct defines the way the registers are stored on the 
+   stack during a system call. */
+
+struct pt_regs {
+       long ebx;
+       long ecx;
+       long edx;
+       long esi;
+       long edi;
+       long ebp;
+       long eax;
+       int  xds;
+       int  xes;
+       long orig_eax;
+       long eip;
+       int  xcs;
+       long eflags;
+       long esp;
+       int  xss;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS            12
+#define PTRACE_SETREGS            13
+#define PTRACE_GETFPREGS          14
+#define PTRACE_SETFPREGS          15
+#define PTRACE_GETFPXREGS         18
+#define PTRACE_SETFPXREGS         19
+
+#define PTRACE_OLDSETOPTIONS         21
+
+#define PTRACE_GET_THREAD_AREA    25
+#define PTRACE_SET_THREAD_AREA    26
+
+#ifdef __KERNEL__
+#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (2 & (regs)->xcs))
+#define instruction_pointer(regs) ((regs)->eip)
+#endif
+
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/segment.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/segment.h
new file mode 100644 (file)
index 0000000..288243f
--- /dev/null
@@ -0,0 +1,96 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+/*
+ * The layout of the per-CPU GDT under Linux:
+ *
+ *   0 - null
+ *   1 - reserved
+ *   2 - reserved
+ *   3 - reserved
+ *
+ *   4 - unused                        <==== new cacheline
+ *   5 - unused
+ *
+ *  ------- start of TLS (Thread-Local Storage) segments:
+ *
+ *   6 - TLS segment #1                        [ glibc's TLS segment ]
+ *   7 - TLS segment #2                        [ Wine's %fs Win32 segment ]
+ *   8 - TLS segment #3
+ *   9 - reserved
+ *  10 - reserved
+ *  11 - reserved
+ *
+ *  ------- start of kernel segments:
+ *
+ *  12 - kernel code segment           <==== new cacheline
+ *  13 - kernel data segment
+ *  14 - default user CS
+ *  15 - default user DS
+ *  16 - TSS
+ *  17 - LDT
+ *  18 - PNPBIOS support (16->32 gate)
+ *  19 - PNPBIOS support
+ *  20 - PNPBIOS support
+ *  21 - PNPBIOS support
+ *  22 - PNPBIOS support
+ *  23 - APM BIOS support
+ *  24 - APM BIOS support
+ *  25 - APM BIOS support 
+ *
+ *  26 - unused
+ *  27 - unused
+ *  28 - unused
+ *  29 - unused
+ *  30 - unused
+ *  31 - TSS for double fault handler
+ */
+#define GDT_ENTRY_TLS_ENTRIES  3
+#define GDT_ENTRY_TLS_MIN      6
+#define GDT_ENTRY_TLS_MAX      (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#define GDT_ENTRY_DEFAULT_USER_CS      14
+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
+
+#define GDT_ENTRY_DEFAULT_USER_DS      15
+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
+
+#define GDT_ENTRY_KERNEL_BASE  12
+
+#define GDT_ENTRY_KERNEL_CS            (GDT_ENTRY_KERNEL_BASE + 0)
+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8 + 1)
+
+#define GDT_ENTRY_KERNEL_DS            (GDT_ENTRY_KERNEL_BASE + 1)
+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8 + 1)
+
+#define GDT_ENTRY_TSS                  (GDT_ENTRY_KERNEL_BASE + 4)
+#define GDT_ENTRY_LDT                  (GDT_ENTRY_KERNEL_BASE + 5)
+
+#define GDT_ENTRY_PNPBIOS_BASE         (GDT_ENTRY_KERNEL_BASE + 6)
+#define GDT_ENTRY_APMBIOS_BASE         (GDT_ENTRY_KERNEL_BASE + 11)
+
+#define GDT_ENTRY_DOUBLEFAULT_TSS      31
+
+/*
+ * The GDT has LAST_RESERVED_GDT_ENTRY + 1 entries
+ */
+#define GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY + 1)
+
+#define GDT_SIZE (GDT_ENTRIES * 8)
+
+/* Simple and small GDT entries for booting only */
+
+#define __BOOT_CS      FLAT_GUESTOS_CS
+
+#define __BOOT_DS      FLAT_GUESTOS_DS
+
+/*
+ * The interrupt descriptor table has room for 256 idt's,
+ * the global descriptor table is dependent on the number
+ * of tasks we can have..
+ */
+#define IDT_ENTRIES 256
+
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/setup.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/setup.h
new file mode 100644 (file)
index 0000000..3b92f29
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ *     Just a place holder. We don't want to have to test x86 before
+ *     we include stuff
+ */
+
+#ifndef _i386_SETUP_H
+#define _i386_SETUP_H
+
+#define PFN_UP(x)      (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x)    ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x)    ((x) << PAGE_SHIFT)
+
+/*
+ * Reserved space for vmalloc and iomap - defined in asm/page.h
+ */
+#define MAXMEM_PFN     PFN_DOWN(MAXMEM)
+#define MAX_NONPAE_PFN (1 << 20)
+
+#define PARAM_SIZE 2048
+#define COMMAND_LINE_SIZE 256
+
+#define OLD_CL_MAGIC_ADDR      0x90020
+#define OLD_CL_MAGIC           0xA33F
+#define OLD_CL_BASE_ADDR       0x90000
+#define OLD_CL_OFFSET          0x90022
+#define NEW_CL_POINTER         0x228   /* Relative to real mode data */
+
+#ifndef __ASSEMBLY__
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+extern unsigned char boot_params[PARAM_SIZE];
+
+#define PARAM  (boot_params)
+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
+#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
+#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
+#define IST_INFO   (*(struct ist_info *) (PARAM+0x60))
+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
+#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
+#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
+#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
+#define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0)))
+#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
+#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
+#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
+#define INITRD_START (start_info.mod_start)
+#define INITRD_SIZE (start_info.mod_len)
+#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
+#define DISK80_SIGNATURE (*(unsigned int*) (PARAM+DISK80_SIG_BUFFER))
+#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
+#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _i386_SETUP_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
new file mode 100644 (file)
index 0000000..8093de0
--- /dev/null
@@ -0,0 +1,83 @@
+#ifndef __XEN_SYNCH_BITOPS_H__
+#define __XEN_SYNCH_BITOPS_H__
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ * Heavily modified to provide guaranteed strong synchronisation
+ * when communicating with Xen or other guest OSes running on other CPUs.
+ */
+
+#include <linux/config.h>
+
+#define ADDR (*(volatile long *) addr)
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ ( 
+        "lock btsl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ (
+        "lock btrl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_change_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ (
+        "lock btcl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "lock btsl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "lock btrl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+
+    __asm__ __volatile__ (
+        "lock btcl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
+{
+    return ((1UL << (nr & 31)) & 
+            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "btl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
+    return oldbit;
+}
+
+#define synch_test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ synch_const_test_bit((nr),(addr)) : \
+ synch_var_test_bit((nr),(addr)))
+
+#endif /* __XEN_SYNCH_BITOPS_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/system.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/system.h
new file mode 100644 (file)
index 0000000..d92ce5d
--- /dev/null
@@ -0,0 +1,525 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <asm/synch_bitops.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/hypervisor.h>
+#include <asm-xen/evtchn.h>
+
+#ifdef __KERNEL__
+
+struct task_struct;    /* one of the stranger aspects of C forward declarations.. */
+extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+
+#define switch_to(prev,next,last) do {                                 \
+       unsigned long esi,edi;                                          \
+       asm volatile("pushfl\n\t"                                       \
+                    "pushl %%ebp\n\t"                                  \
+                    "movl %%esp,%0\n\t"        /* save ESP */          \
+                    "movl %5,%%esp\n\t"        /* restore ESP */       \
+                    "movl $1f,%1\n\t"          /* save EIP */          \
+                    "pushl %6\n\t"             /* restore EIP */       \
+                    "jmp __switch_to\n"                                \
+                    "1:\t"                                             \
+                    "popl %%ebp\n\t"                                   \
+                    "popfl"                                            \
+                    :"=m" (prev->thread.esp),"=m" (prev->thread.eip),  \
+                     "=a" (last),"=S" (esi),"=D" (edi)                 \
+                    :"m" (next->thread.esp),"m" (next->thread.eip),    \
+                     "2" (prev), "d" (next));                          \
+} while (0)
+
+#define _set_base(addr,base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+       "rorl $16,%%edx\n\t" \
+       "movb %%dl,%2\n\t" \
+       "movb %%dh,%3" \
+       :"=&d" (__pr) \
+       :"m" (*((addr)+2)), \
+        "m" (*((addr)+4)), \
+        "m" (*((addr)+7)), \
+         "0" (base) \
+        ); } while(0)
+
+#define _set_limit(addr,limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+       "rorl $16,%%edx\n\t" \
+       "movb %2,%%dh\n\t" \
+       "andb $0xf0,%%dh\n\t" \
+       "orb %%dh,%%dl\n\t" \
+       "movb %%dl,%2" \
+       :"=&d" (__lr) \
+       :"m" (*(addr)), \
+        "m" (*((addr)+6)), \
+        "0" (limit) \
+        ); } while(0)
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+       unsigned long __base;
+       __asm__("movb %3,%%dh\n\t"
+               "movb %2,%%dl\n\t"
+               "shll $16,%%edx\n\t"
+               "movw %1,%%dx"
+               :"=&d" (__base)
+               :"m" (*((addr)+2)),
+                "m" (*((addr)+4)),
+                "m" (*((addr)+7)));
+       return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg,value)                 \
+       asm volatile("\n"                       \
+               "1:\t"                          \
+               "movl %0,%%" #seg "\n"          \
+               "2:\n"                          \
+               ".section .fixup,\"ax\"\n"      \
+               "3:\t"                          \
+               "pushl $0\n\t"                  \
+               "popl %%" #seg "\n\t"           \
+               "jmp 2b\n"                      \
+               ".previous\n"                   \
+               ".section __ex_table,\"a\"\n\t" \
+               ".align 4\n\t"                  \
+               ".long 1b,3b\n"                 \
+               ".previous"                     \
+               : :"m" (*(unsigned int *)&(value)))
+
+/*
+ * Save a segment register away
+ */
+#define savesegment(seg, value) \
+       asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+/* NB. 'clts' is done for us by Xen during virtual trap. */
+#define clts() ((void)0)
+#define read_cr0() \
+       BUG();
+#define write_cr0(x) \
+       BUG();
+
+#define read_cr4() \
+       BUG();
+#define write_cr4(x) \
+       BUG();
+#define stts() (HYPERVISOR_fpu_taskswitch())
+
+#endif /* __KERNEL__ */
+
+#define wbinvd() \
+       BUG();
+//     __asm__ __volatile__ ("wbinvd": : :"memory");
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+       unsigned long __limit;
+       __asm__("lsll %1,%0"
+               :"=r" (__limit):"r" (segment));
+       return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+/*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ *
+ * cmpxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
+ */
+static inline void __set_64bit (unsigned long long * ptr,
+               unsigned int low, unsigned int high)
+{
+       __asm__ __volatile__ (
+               "\n1:\t"
+               "movl (%0), %%eax\n\t"
+               "movl 4(%0), %%edx\n\t"
+               "lock cmpxchg8b (%0)\n\t"
+               "jnz 1b"
+               : /* no outputs */
+               :       "D"(ptr),
+                       "b"(low),
+                       "c"(high)
+               :       "ax","dx","memory");
+}
+
+static inline void __set_64bit_constant (unsigned long long *ptr,
+                                                unsigned long long value)
+{
+       __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+}
+#define ll_low(x)      *(((unsigned int*)&(x))+0)
+#define ll_high(x)     *(((unsigned int*)&(x))+1)
+
+static inline void __set_64bit_var (unsigned long long *ptr,
+                        unsigned long long value)
+{
+       __set_64bit(ptr,ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+#define _set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ *       but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+       switch (size) {
+               case 1:
+                       __asm__ __volatile__("xchgb %b0,%1"
+                               :"=q" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+               case 2:
+                       __asm__ __volatile__("xchgw %w0,%1"
+                               :"=r" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+               case 4:
+                       __asm__ __volatile__("xchgl %0,%1"
+                               :"=r" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+       }
+       return x;
+}
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#ifdef CONFIG_X86_CMPXCHG
+#define __HAVE_ARCH_CMPXCHG 1
+#endif
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                                     unsigned long new, int size)
+{
+       unsigned long prev;
+       switch (size) {
+       case 1:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+                                    : "=a"(prev)
+                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 2:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+                                    : "=a"(prev)
+                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       case 4:
+               __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+                                    : "=a"(prev)
+                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "memory");
+               return prev;
+       }
+       return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+       ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+                                       (unsigned long)(n),sizeof(*(ptr))))
+    
+#ifdef __KERNEL__
+struct alt_instr { 
+       __u8 *instr;            /* original instruction */
+       __u8 *replacement;
+       __u8  cpuid;            /* cpuid bit set for replacement */
+       __u8  instrlen;         /* length of original instruction */
+       __u8  replacementlen;   /* length of new instruction, <= instrlen */ 
+       __u8  pad;
+}; 
+#endif
+
+/* 
+ * Alternative instructions for different CPU types or capabilities.
+ * 
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ * 
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ * 
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature)       \
+       asm volatile ("661:\n\t" oldinstr "\n662:\n"                 \
+                     ".section .altinstructions,\"a\"\n"            \
+                     "  .align 4\n"                                   \
+                     "  .long 661b\n"            /* label */          \
+                     "  .long 663f\n"            /* new instruction */         \
+                     "  .byte %c0\n"             /* feature bit */    \
+                     "  .byte 662b-661b\n"       /* sourcelen */      \
+                     "  .byte 664f-663f\n"       /* replacementlen */ \
+                     ".previous\n"                                             \
+                     ".section .altinstr_replacement,\"ax\"\n"                 \
+                     "663:\n\t" newinstr "\n664:\n"   /* replacement */    \
+                     ".previous" :: "i" (feature) : "memory")  
+
+/*
+ * Alternative inline assembly with input.
+ * 
+ * Pecularities:
+ * No memory clobber here. 
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the 
+ * replacement maake sure to pad to the worst case length.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input)                  \
+       asm volatile ("661:\n\t" oldinstr "\n662:\n"                            \
+                     ".section .altinstructions,\"a\"\n"                       \
+                     "  .align 4\n"                                            \
+                     "  .long 661b\n"            /* label */                   \
+                     "  .long 663f\n"            /* new instruction */         \
+                     "  .byte %c0\n"             /* feature bit */             \
+                     "  .byte 662b-661b\n"       /* sourcelen */               \
+                     "  .byte 664f-663f\n"       /* replacementlen */          \
+                     ".previous\n"                                             \
+                     ".section .altinstr_replacement,\"ax\"\n"                 \
+                     "663:\n\t" newinstr "\n664:\n"   /* replacement */        \
+                     ".previous" :: "i" (feature), input)  
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+
+/* 
+ * Actually only lfence would be needed for mb() because all stores done 
+ * by the kernel should be already ordered. But keep a full barrier for now. 
+ */
+
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier.  All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads.  This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies.  See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ *     CPU 0                           CPU 1
+ *
+ *     b = 2;
+ *     memory_barrier();
+ *     p = &b;                         q = p;
+ *                                     read_barrier_depends();
+ *                                     d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends().  However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ *     CPU 0                           CPU 1
+ *
+ *     a = 2;
+ *     memory_barrier();
+ *     b = 3;                          y = b;
+ *                                     read_barrier_depends();
+ *                                     x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+ * in cases like thiswhere there are no data dependencies.
+ **/
+
+#define read_barrier_depends() do { } while(0)
+
+#ifdef CONFIG_X86_OOSTORE
+/* Actually there are no OOO store capable CPUs for now that do SSE, 
+   but make it already an possibility. */
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#else
+#define wmb()  __asm__ __volatile__ ("": : :"memory")
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb()       mb()
+#define smp_rmb()      rmb()
+#define smp_wmb()      wmb()
+#define smp_read_barrier_depends()     read_barrier_depends()
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else
+#define smp_mb()       barrier()
+#define smp_rmb()      barrier()
+#define smp_wmb()      barrier()
+#define smp_read_barrier_depends()     do { } while(0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+/* interrupt control.. */
+
+/* 
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operatiosn must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
+ */
+
+#define __cli()                                                               \
+do {                                                                          \
+    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
+    barrier();                                                                \
+} while (0)
+
+#define __sti()                                                               \
+do {                                                                          \
+    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
+    barrier();                                                                \
+    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
+    barrier(); /* unmask then check (avoid races) */                          \
+    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
+        evtchn_do_upcall(NULL);                                               \
+} while (0)
+
+#define __save_flags(x)                                                       \
+do {                                                                          \
+    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
+} while (0)
+
+#define __restore_flags(x)                                                    \
+do {                                                                          \
+    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
+    barrier();                                                                \
+    if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) {            \
+        barrier(); /* unmask then check (avoid races) */                      \
+        if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )          \
+            evtchn_do_upcall(NULL);                                           \
+    }                                                                         \
+} while (0)
+
+#define safe_halt()             ((void)0)
+
+#define __save_and_cli(x)                                                     \
+do {                                                                          \
+    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
+    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
+    barrier();                                                                \
+} while (0)
+
+#define __save_and_sti(x)                                                     \
+do {                                                                          \
+    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
+    barrier();                                                                \
+    (x) = _shared->vcpu_data[0].evtchn_upcall_mask;                           \
+    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
+    barrier(); /* unmask then check (avoid races) */                          \
+    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
+        evtchn_do_upcall(NULL);                                               \
+} while (0)
+
+#define local_irq_save(x)      __save_and_cli(x)
+#define local_irq_restore(x)    __restore_flags(x)
+#define local_save_flags(x)     __save_flags(x)
+#define local_irq_disable()     __cli()
+#define local_irq_enable()      __sti()
+
+#define irqs_disabled()                        \
+       HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+extern unsigned long dmi_broken;
+extern int is_sony_vaio_laptop;
+extern int es7000_plat;
+
+#define BROKEN_ACPI_Sx         0x0001
+#define BROKEN_INIT_AFTER_S1   0x0002
+#define BROKEN_PNP_BIOS                0x0004
+#define BROKEN_CPUFREQ         0x0008
+
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/timer.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/timer.h
new file mode 100644 (file)
index 0000000..add9f20
--- /dev/null
@@ -0,0 +1,58 @@
+#ifndef _ASMi386_TIMER_H
+#define _ASMi386_TIMER_H
+
+/**
+ * struct timer_ops - used to define a timer source
+ *
+ * @name: name of the timer.
+ * @init: Probes and initializes the timer. Takes clock= override 
+ *        string as an argument. Returns 0 on success, anything else
+ *        on failure.
+ * @mark_offset: called by the timer interrupt.
+ * @get_offset:  called by gettimeofday(). Returns the number of microseconds
+ *               since the last timer interupt.
+ * @monotonic_clock: returns the number of nanoseconds since the init of the
+ *                   timer.
+ * @delay: delays this many clock cycles.
+ */
+struct timer_opts{
+       char* name;
+       int (*init)(char *override);
+       void (*mark_offset)(void);
+       unsigned long (*get_offset)(void);
+       unsigned long long (*monotonic_clock)(void);
+       void (*delay)(unsigned long);
+};
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+extern struct timer_opts* select_timer(void);
+extern void clock_fallback(void);
+
+/* Modifiers for buggy PIT handling */
+
+extern int pit_latch_buggy;
+
+extern struct timer_opts *cur_timer;
+extern int timer_ack;
+
+/* list of externed timers */
+extern struct timer_opts timer_none;
+extern struct timer_opts timer_pit;
+extern struct timer_opts timer_tsc;
+#ifdef CONFIG_X86_CYCLONE_TIMER
+extern struct timer_opts timer_cyclone;
+#endif
+extern struct timer_opts timer_xen;
+
+extern unsigned long calibrate_tsc(void);
+extern void init_cpu_khz(void);
+#ifdef CONFIG_HPET_TIMER
+extern struct timer_opts timer_hpet;
+extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
+#endif
+
+#ifdef CONFIG_X86_PM_TIMER
+extern struct timer_opts timer_pmtmr;
+#endif
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/tlbflush.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
new file mode 100644 (file)
index 0000000..2c06b03
--- /dev/null
@@ -0,0 +1,133 @@
+#ifndef _I386_TLBFLUSH_H
+#define _I386_TLBFLUSH_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <asm/processor.h>
+
+#define __flush_tlb() do {                                             \
+       queue_tlb_flush();                                              \
+       xen_flush_page_update_queue();                                  \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+#define __flush_tlb_global()                                           \
+       do {                                                            \
+               unsigned int tmpreg;                                    \
+                                                                       \
+               __asm__ __volatile__(                                   \
+                       "movl %1, %%cr4;  # turn off PGE     \n"        \
+                       "movl %%cr3, %0;                     \n"        \
+                       "movl %0, %%cr3;  # flush TLB        \n"        \
+                       "movl %2, %%cr4;  # turn PGE back on \n"        \
+                       : "=&r" (tmpreg)                                \
+                       : "r" (mmu_cr4_features & ~X86_CR4_PGE),        \
+                         "r" (mmu_cr4_features)                        \
+                       : "memory");                                    \
+       } while (0)
+
+extern unsigned long pgkern_mask;
+
+# define __flush_tlb_all()                                             \
+       do {                                                            \
+               if (cpu_has_pge)                                        \
+                       __flush_tlb_global();                           \
+               else                                                    \
+                       __flush_tlb();                                  \
+       } while (0)
+
+#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
+
+#define __flush_tlb_single(addr) do {                                  \
+       queue_invlpg(addr);                                             \
+       xen_flush_page_update_queue();                                  \
+} while (/* CONSTCOND */0)
+
+# define __flush_tlb_one(addr) __flush_tlb_single(addr)
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb() flushes the current mm struct TLBs
+ *  - flush_tlb_all() flushes all processes TLBs
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb_all()
+#define local_flush_tlb() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+       if (mm == current->active_mm)
+               __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+       unsigned long addr)
+{
+       if (vma->vm_mm == current->active_mm)
+               __flush_tlb_one(addr);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+       unsigned long start, unsigned long end)
+{
+       if (vma->vm_mm == current->active_mm)
+               __flush_tlb();
+}
+
+#else
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() \
+       __flush_tlb()
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb()    flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
+{
+       flush_tlb_mm(vma->vm_mm);
+}
+
+#define TLBSTATE_OK    1
+#define TLBSTATE_LAZY  2
+
+struct tlb_state
+{
+       struct mm_struct *active_mm;
+       int state;
+       char __cacheline_padding[L1_CACHE_BYTES-8];
+};
+extern struct tlb_state cpu_tlbstate[NR_CPUS];
+
+
+#endif
+
+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+                                     unsigned long start, unsigned long end)
+{
+       /* i386 does not keep any page table caches in TLB */
+}
+
+#endif /* _I386_TLBFLUSH_H */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/xor.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/xor.h
new file mode 100644 (file)
index 0000000..fe2608c
--- /dev/null
@@ -0,0 +1,884 @@
+/*
+ * include/asm-i386/xor.h
+ *
+ * Optimized RAID-5 checksumming functions for MMX and SSE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * High-speed RAID5 checksumming functions utilizing MMX instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+#define LD(x,y)                "       movq   8*("#x")(%1), %%mm"#y"   ;\n"
+#define ST(x,y)                "       movq %%mm"#y",   8*("#x")(%1)   ;\n"
+#define XO1(x,y)       "       pxor   8*("#x")(%2), %%mm"#y"   ;\n"
+#define XO2(x,y)       "       pxor   8*("#x")(%3), %%mm"#y"   ;\n"
+#define XO3(x,y)       "       pxor   8*("#x")(%4), %%mm"#y"   ;\n"
+#define XO4(x,y)       "       pxor   8*("#x")(%5), %%mm"#y"   ;\n"
+
+#include <asm/i387.h>
+
+static void
+xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+       unsigned long lines = bytes >> 7;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+       LD(i,0)                                 \
+               LD(i+1,1)                       \
+                       LD(i+2,2)               \
+                               LD(i+3,3)       \
+       XO1(i,0)                                \
+       ST(i,0)                                 \
+               XO1(i+1,1)                      \
+               ST(i+1,1)                       \
+                       XO1(i+2,2)              \
+                       ST(i+2,2)               \
+                               XO1(i+3,3)      \
+                               ST(i+3,3)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+       BLOCK(0)
+       BLOCK(4)
+       BLOCK(8)
+       BLOCK(12)
+
+       "       addl $128, %1         ;\n"
+       "       addl $128, %2         ;\n"
+       "       decl %0               ;\n"
+       "       jnz 1b                ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+             unsigned long *p3)
+{
+       unsigned long lines = bytes >> 7;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+       LD(i,0)                                 \
+               LD(i+1,1)                       \
+                       LD(i+2,2)               \
+                               LD(i+3,3)       \
+       XO1(i,0)                                \
+               XO1(i+1,1)                      \
+                       XO1(i+2,2)              \
+                               XO1(i+3,3)      \
+       XO2(i,0)                                \
+       ST(i,0)                                 \
+               XO2(i+1,1)                      \
+               ST(i+1,1)                       \
+                       XO2(i+2,2)              \
+                       ST(i+2,2)               \
+                               XO2(i+3,3)      \
+                               ST(i+3,3)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+       BLOCK(0)
+       BLOCK(4)
+       BLOCK(8)
+       BLOCK(12)
+
+       "       addl $128, %1         ;\n"
+       "       addl $128, %2         ;\n"
+       "       addl $128, %3         ;\n"
+       "       decl %0               ;\n"
+       "       jnz 1b                ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+             unsigned long *p3, unsigned long *p4)
+{
+       unsigned long lines = bytes >> 7;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+       LD(i,0)                                 \
+               LD(i+1,1)                       \
+                       LD(i+2,2)               \
+                               LD(i+3,3)       \
+       XO1(i,0)                                \
+               XO1(i+1,1)                      \
+                       XO1(i+2,2)              \
+                               XO1(i+3,3)      \
+       XO2(i,0)                                \
+               XO2(i+1,1)                      \
+                       XO2(i+2,2)              \
+                               XO2(i+3,3)      \
+       XO3(i,0)                                \
+       ST(i,0)                                 \
+               XO3(i+1,1)                      \
+               ST(i+1,1)                       \
+                       XO3(i+2,2)              \
+                       ST(i+2,2)               \
+                               XO3(i+3,3)      \
+                               ST(i+3,3)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+       BLOCK(0)
+       BLOCK(4)
+       BLOCK(8)
+       BLOCK(12)
+
+       "       addl $128, %1         ;\n"
+       "       addl $128, %2         ;\n"
+       "       addl $128, %3         ;\n"
+       "       addl $128, %4         ;\n"
+       "       decl %0               ;\n"
+       "       jnz 1b                ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+
+static void
+xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+             unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+       unsigned long lines = bytes >> 7;
+
+       kernel_fpu_begin();
+
+       /* Make sure GCC forgets anything it knows about p4 or p5,
+          such that it won't pass to the asm volatile below a
+          register that is shared with any other variable.  That's
+          because we modify p4 and p5 there, but we can't mark them
+          as read/write, otherwise we'd overflow the 10-asm-operands
+          limit of GCC < 3.1.  */
+       __asm__ ("" : "+r" (p4), "+r" (p5));
+
+       __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+       LD(i,0)                                 \
+               LD(i+1,1)                       \
+                       LD(i+2,2)               \
+                               LD(i+3,3)       \
+       XO1(i,0)                                \
+               XO1(i+1,1)                      \
+                       XO1(i+2,2)              \
+                               XO1(i+3,3)      \
+       XO2(i,0)                                \
+               XO2(i+1,1)                      \
+                       XO2(i+2,2)              \
+                               XO2(i+3,3)      \
+       XO3(i,0)                                \
+               XO3(i+1,1)                      \
+                       XO3(i+2,2)              \
+                               XO3(i+3,3)      \
+       XO4(i,0)                                \
+       ST(i,0)                                 \
+               XO4(i+1,1)                      \
+               ST(i+1,1)                       \
+                       XO4(i+2,2)              \
+                       ST(i+2,2)               \
+                               XO4(i+3,3)      \
+                               ST(i+3,3)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+       BLOCK(0)
+       BLOCK(4)
+       BLOCK(8)
+       BLOCK(12)
+
+       "       addl $128, %1         ;\n"
+       "       addl $128, %2         ;\n"
+       "       addl $128, %3         ;\n"
+       "       addl $128, %4         ;\n"
+       "       addl $128, %5         ;\n"
+       "       decl %0               ;\n"
+       "       jnz 1b                ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       : "r" (p4), "r" (p5) 
+       : "memory");
+
+       /* p4 and p5 were modified, and now the variables are dead.
+          Clobber them just to be sure nobody does something stupid
+          like assuming they have some legal value.  */
+       __asm__ ("" : "=r" (p4), "=r" (p5));
+
+       kernel_fpu_end();
+}
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef BLOCK
+
+static void
+xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+       unsigned long lines = bytes >> 6;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+       " .align 32                  ;\n"
+       " 1:                         ;\n"
+       "       movq   (%1), %%mm0   ;\n"
+       "       movq  8(%1), %%mm1   ;\n"
+       "       pxor   (%2), %%mm0   ;\n"
+       "       movq 16(%1), %%mm2   ;\n"
+       "       movq %%mm0,   (%1)   ;\n"
+       "       pxor  8(%2), %%mm1   ;\n"
+       "       movq 24(%1), %%mm3   ;\n"
+       "       movq %%mm1,  8(%1)   ;\n"
+       "       pxor 16(%2), %%mm2   ;\n"
+       "       movq 32(%1), %%mm4   ;\n"
+       "       movq %%mm2, 16(%1)   ;\n"
+       "       pxor 24(%2), %%mm3   ;\n"
+       "       movq 40(%1), %%mm5   ;\n"
+       "       movq %%mm3, 24(%1)   ;\n"
+       "       pxor 32(%2), %%mm4   ;\n"
+       "       movq 48(%1), %%mm6   ;\n"
+       "       movq %%mm4, 32(%1)   ;\n"
+       "       pxor 40(%2), %%mm5   ;\n"
+       "       movq 56(%1), %%mm7   ;\n"
+       "       movq %%mm5, 40(%1)   ;\n"
+       "       pxor 48(%2), %%mm6   ;\n"
+       "       pxor 56(%2), %%mm7   ;\n"
+       "       movq %%mm6, 48(%1)   ;\n"
+       "       movq %%mm7, 56(%1)   ;\n"
+       
+       "       addl $64, %1         ;\n"
+       "       addl $64, %2         ;\n"
+       "       decl %0              ;\n"
+       "       jnz 1b               ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+            unsigned long *p3)
+{
+       unsigned long lines = bytes >> 6;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+       " .align 32,0x90             ;\n"
+       " 1:                         ;\n"
+       "       movq   (%1), %%mm0   ;\n"
+       "       movq  8(%1), %%mm1   ;\n"
+       "       pxor   (%2), %%mm0   ;\n"
+       "       movq 16(%1), %%mm2   ;\n"
+       "       pxor  8(%2), %%mm1   ;\n"
+       "       pxor   (%3), %%mm0   ;\n"
+       "       pxor 16(%2), %%mm2   ;\n"
+       "       movq %%mm0,   (%1)   ;\n"
+       "       pxor  8(%3), %%mm1   ;\n"
+       "       pxor 16(%3), %%mm2   ;\n"
+       "       movq 24(%1), %%mm3   ;\n"
+       "       movq %%mm1,  8(%1)   ;\n"
+       "       movq 32(%1), %%mm4   ;\n"
+       "       movq 40(%1), %%mm5   ;\n"
+       "       pxor 24(%2), %%mm3   ;\n"
+       "       movq %%mm2, 16(%1)   ;\n"
+       "       pxor 32(%2), %%mm4   ;\n"
+       "       pxor 24(%3), %%mm3   ;\n"
+       "       pxor 40(%2), %%mm5   ;\n"
+       "       movq %%mm3, 24(%1)   ;\n"
+       "       pxor 32(%3), %%mm4   ;\n"
+       "       pxor 40(%3), %%mm5   ;\n"
+       "       movq 48(%1), %%mm6   ;\n"
+       "       movq %%mm4, 32(%1)   ;\n"
+       "       movq 56(%1), %%mm7   ;\n"
+       "       pxor 48(%2), %%mm6   ;\n"
+       "       movq %%mm5, 40(%1)   ;\n"
+       "       pxor 56(%2), %%mm7   ;\n"
+       "       pxor 48(%3), %%mm6   ;\n"
+       "       pxor 56(%3), %%mm7   ;\n"
+       "       movq %%mm6, 48(%1)   ;\n"
+       "       movq %%mm7, 56(%1)   ;\n"
+      
+       "       addl $64, %1         ;\n"
+       "       addl $64, %2         ;\n"
+       "       addl $64, %3         ;\n"
+       "       decl %0              ;\n"
+       "       jnz 1b               ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       :
+       : "memory" );
+
+       kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+            unsigned long *p3, unsigned long *p4)
+{
+       unsigned long lines = bytes >> 6;
+
+       kernel_fpu_begin();
+
+       __asm__ __volatile__ (
+       " .align 32,0x90             ;\n"
+       " 1:                         ;\n"
+       "       movq   (%1), %%mm0   ;\n"
+       "       movq  8(%1), %%mm1   ;\n"
+       "       pxor   (%2), %%mm0   ;\n"
+       "       movq 16(%1), %%mm2   ;\n"
+       "       pxor  8(%2), %%mm1   ;\n"
+       "       pxor   (%3), %%mm0   ;\n"
+       "       pxor 16(%2), %%mm2   ;\n"
+       "       pxor  8(%3), %%mm1   ;\n"
+       "       pxor   (%4), %%mm0   ;\n"
+       "       movq 24(%1), %%mm3   ;\n"
+       "       pxor 16(%3), %%mm2   ;\n"
+       "       pxor  8(%4), %%mm1   ;\n"
+       "       movq %%mm0,   (%1)   ;\n"
+       "       movq 32(%1), %%mm4   ;\n"
+       "       pxor 24(%2), %%mm3   ;\n"
+       "       pxor 16(%4), %%mm2   ;\n"
+       "       movq %%mm1,  8(%1)   ;\n"
+       "       movq 40(%1), %%mm5   ;\n"
+       "       pxor 32(%2), %%mm4   ;\n"
+       "       pxor 24(%3), %%mm3   ;\n"
+       "       movq %%mm2, 16(%1)   ;\n"
+       "       pxor 40(%2), %%mm5   ;\n"
+       "       pxor 32(%3), %%mm4   ;\n"
+       "       pxor 24(%4), %%mm3   ;\n"
+       "       movq %%mm3, 24(%1)   ;\n"
+       "       movq 56(%1), %%mm7   ;\n"
+       "       movq 48(%1), %%mm6   ;\n"
+       "       pxor 40(%3), %%mm5   ;\n"
+       "       pxor 32(%4), %%mm4   ;\n"
+       "       pxor 48(%2), %%mm6   ;\n"
+       "       movq %%mm4, 32(%1)   ;\n"
+       "       pxor 56(%2), %%mm7   ;\n"
+       "       pxor 40(%4), %%mm5   ;\n"
+       "       pxor 48(%3), %%mm6   ;\n"
+       "       pxor 56(%3), %%mm7   ;\n"
+       "       movq %%mm5, 40(%1)   ;\n"
+       "       pxor 48(%4), %%mm6   ;\n"
+       "       pxor 56(%4), %%mm7   ;\n"
+       "       movq %%mm6, 48(%1)   ;\n"
+       "       movq %%mm7, 56(%1)   ;\n"
+      
+       "       addl $64, %1         ;\n"
+       "       addl $64, %2         ;\n"
+       "       addl $64, %3         ;\n"
+       "       addl $64, %4         ;\n"
+       "       decl %0              ;\n"
+       "       jnz 1b               ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+       :
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+            unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+       unsigned long lines = bytes >> 6;
+
+       kernel_fpu_begin();
+
+       /* Make sure GCC forgets anything it knows about p4 or p5,
+          such that it won't pass to the asm volatile below a
+          register that is shared with any other variable.  That's
+          because we modify p4 and p5 there, but we can't mark them
+          as read/write, otherwise we'd overflow the 10-asm-operands
+          limit of GCC < 3.1.  */
+       __asm__ ("" : "+r" (p4), "+r" (p5));
+
+       __asm__ __volatile__ (
+       " .align 32,0x90             ;\n"
+       " 1:                         ;\n"
+       "       movq   (%1), %%mm0   ;\n"
+       "       movq  8(%1), %%mm1   ;\n"
+       "       pxor   (%2), %%mm0   ;\n"
+       "       pxor  8(%2), %%mm1   ;\n"
+       "       movq 16(%1), %%mm2   ;\n"
+       "       pxor   (%3), %%mm0   ;\n"
+       "       pxor  8(%3), %%mm1   ;\n"
+       "       pxor 16(%2), %%mm2   ;\n"
+       "       pxor   (%4), %%mm0   ;\n"
+       "       pxor  8(%4), %%mm1   ;\n"
+       "       pxor 16(%3), %%mm2   ;\n"
+       "       movq 24(%1), %%mm3   ;\n"
+       "       pxor   (%5), %%mm0   ;\n"
+       "       pxor  8(%5), %%mm1   ;\n"
+       "       movq %%mm0,   (%1)   ;\n"
+       "       pxor 16(%4), %%mm2   ;\n"
+       "       pxor 24(%2), %%mm3   ;\n"
+       "       movq %%mm1,  8(%1)   ;\n"
+       "       pxor 16(%5), %%mm2   ;\n"
+       "       pxor 24(%3), %%mm3   ;\n"
+       "       movq 32(%1), %%mm4   ;\n"
+       "       movq %%mm2, 16(%1)   ;\n"
+       "       pxor 24(%4), %%mm3   ;\n"
+       "       pxor 32(%2), %%mm4   ;\n"
+       "       movq 40(%1), %%mm5   ;\n"
+       "       pxor 24(%5), %%mm3   ;\n"
+       "       pxor 32(%3), %%mm4   ;\n"
+       "       pxor 40(%2), %%mm5   ;\n"
+       "       movq %%mm3, 24(%1)   ;\n"
+       "       pxor 32(%4), %%mm4   ;\n"
+       "       pxor 40(%3), %%mm5   ;\n"
+       "       movq 48(%1), %%mm6   ;\n"
+       "       movq 56(%1), %%mm7   ;\n"
+       "       pxor 32(%5), %%mm4   ;\n"
+       "       pxor 40(%4), %%mm5   ;\n"
+       "       pxor 48(%2), %%mm6   ;\n"
+       "       pxor 56(%2), %%mm7   ;\n"
+       "       movq %%mm4, 32(%1)   ;\n"
+       "       pxor 48(%3), %%mm6   ;\n"
+       "       pxor 56(%3), %%mm7   ;\n"
+       "       pxor 40(%5), %%mm5   ;\n"
+       "       pxor 48(%4), %%mm6   ;\n"
+       "       pxor 56(%4), %%mm7   ;\n"
+       "       movq %%mm5, 40(%1)   ;\n"
+       "       pxor 48(%5), %%mm6   ;\n"
+       "       pxor 56(%5), %%mm7   ;\n"
+       "       movq %%mm6, 48(%1)   ;\n"
+       "       movq %%mm7, 56(%1)   ;\n"
+      
+       "       addl $64, %1         ;\n"
+       "       addl $64, %2         ;\n"
+       "       addl $64, %3         ;\n"
+       "       addl $64, %4         ;\n"
+       "       addl $64, %5         ;\n"
+       "       decl %0              ;\n"
+       "       jnz 1b               ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       : "r" (p4), "r" (p5)
+       : "memory");
+
+       /* p4 and p5 were modified, and now the variables are dead.
+          Clobber them just to be sure nobody does something stupid
+          like assuming they have some legal value.  */
+       __asm__ ("" : "=r" (p4), "=r" (p5));
+
+       kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_pII_mmx = {
+       .name = "pII_mmx",
+       .do_2 = xor_pII_mmx_2,
+       .do_3 = xor_pII_mmx_3,
+       .do_4 = xor_pII_mmx_4,
+       .do_5 = xor_pII_mmx_5,
+};
+
+static struct xor_block_template xor_block_p5_mmx = {
+       .name = "p5_mmx",
+       .do_2 = xor_p5_mmx_2,
+       .do_3 = xor_p5_mmx_3,
+       .do_4 = xor_p5_mmx_4,
+       .do_5 = xor_p5_mmx_5,
+};
+
+/*
+ * Cache avoiding checksumming functions utilizing KNI instructions
+ * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
+ */
+
+#define XMMS_SAVE do {                         \
+       preempt_disable();                      \
+       if (!(current->flags & PF_USEDFPU))     \
+               clts();                         \
+       __asm__ __volatile__ (                  \
+               "movups %%xmm0,(%1)     ;\n\t"  \
+               "movups %%xmm1,0x10(%1) ;\n\t"  \
+               "movups %%xmm2,0x20(%1) ;\n\t"  \
+               "movups %%xmm3,0x30(%1) ;\n\t"  \
+               : "=&r" (cr0)                   \
+               : "r" (xmm_save)                \
+               : "memory");                    \
+} while(0)
+
+#define XMMS_RESTORE do {                      \
+       __asm__ __volatile__ (                  \
+               "sfence                 ;\n\t"  \
+               "movups (%1),%%xmm0     ;\n\t"  \
+               "movups 0x10(%1),%%xmm1 ;\n\t"  \
+               "movups 0x20(%1),%%xmm2 ;\n\t"  \
+               "movups 0x30(%1),%%xmm3 ;\n\t"  \
+               :                               \
+               : "r" (cr0), "r" (xmm_save)     \
+               : "memory");                    \
+       if (!(current->flags & PF_USEDFPU))     \
+               stts();                         \
+       preempt_enable();                       \
+} while(0)
+
+#define ALIGN16 __attribute__((aligned(16)))
+
+#define OFFS(x)                "16*("#x")"
+#define PF_OFFS(x)     "256+16*("#x")"
+#define        PF0(x)          "       prefetchnta "PF_OFFS(x)"(%1)            ;\n"
+#define LD(x,y)                "       movaps   "OFFS(x)"(%1), %%xmm"#y"       ;\n"
+#define ST(x,y)                "       movaps %%xmm"#y",   "OFFS(x)"(%1)       ;\n"
+#define PF1(x)         "       prefetchnta "PF_OFFS(x)"(%2)            ;\n"
+#define PF2(x)         "       prefetchnta "PF_OFFS(x)"(%3)            ;\n"
+#define PF3(x)         "       prefetchnta "PF_OFFS(x)"(%4)            ;\n"
+#define PF4(x)         "       prefetchnta "PF_OFFS(x)"(%5)            ;\n"
+#define PF5(x)         "       prefetchnta "PF_OFFS(x)"(%6)            ;\n"
+#define XO1(x,y)       "       xorps   "OFFS(x)"(%2), %%xmm"#y"        ;\n"
+#define XO2(x,y)       "       xorps   "OFFS(x)"(%3), %%xmm"#y"        ;\n"
+#define XO3(x,y)       "       xorps   "OFFS(x)"(%4), %%xmm"#y"        ;\n"
+#define XO4(x,y)       "       xorps   "OFFS(x)"(%5), %%xmm"#y"        ;\n"
+#define XO5(x,y)       "       xorps   "OFFS(x)"(%6), %%xmm"#y"        ;\n"
+
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+        unsigned long lines = bytes >> 8;
+       char xmm_save[16*4] ALIGN16;
+       int cr0;
+
+       XMMS_SAVE;
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+               LD(i,0)                                 \
+                       LD(i+1,1)                       \
+               PF1(i)                                  \
+                               PF1(i+2)                \
+                               LD(i+2,2)               \
+                                       LD(i+3,3)       \
+               PF0(i+4)                                \
+                               PF0(i+6)                \
+               XO1(i,0)                                \
+                       XO1(i+1,1)                      \
+                               XO1(i+2,2)              \
+                                       XO1(i+3,3)      \
+               ST(i,0)                                 \
+                       ST(i+1,1)                       \
+                               ST(i+2,2)               \
+                                       ST(i+3,3)       \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+        " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2)
+       :
+        : "memory");
+
+       XMMS_RESTORE;
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+         unsigned long *p3)
+{
+        unsigned long lines = bytes >> 8;
+       char xmm_save[16*4] ALIGN16;
+       int cr0;
+
+       XMMS_SAVE;
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+               PF1(i)                                  \
+                               PF1(i+2)                \
+               LD(i,0)                                 \
+                       LD(i+1,1)                       \
+                               LD(i+2,2)               \
+                                       LD(i+3,3)       \
+               PF2(i)                                  \
+                               PF2(i+2)                \
+               PF0(i+4)                                \
+                               PF0(i+6)                \
+               XO1(i,0)                                \
+                       XO1(i+1,1)                      \
+                               XO1(i+2,2)              \
+                                       XO1(i+3,3)      \
+               XO2(i,0)                                \
+                       XO2(i+1,1)                      \
+                               XO2(i+2,2)              \
+                                       XO2(i+3,3)      \
+               ST(i,0)                                 \
+                       ST(i+1,1)                       \
+                               ST(i+2,2)               \
+                                       ST(i+3,3)       \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+        " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       addl $256, %3           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r"(p2), "+r"(p3)
+       :
+        : "memory" );
+
+       XMMS_RESTORE;
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+         unsigned long *p3, unsigned long *p4)
+{
+        unsigned long lines = bytes >> 8;
+       char xmm_save[16*4] ALIGN16;
+       int cr0;
+
+       XMMS_SAVE;
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+               PF1(i)                                  \
+                               PF1(i+2)                \
+               LD(i,0)                                 \
+                       LD(i+1,1)                       \
+                               LD(i+2,2)               \
+                                       LD(i+3,3)       \
+               PF2(i)                                  \
+                               PF2(i+2)                \
+               XO1(i,0)                                \
+                       XO1(i+1,1)                      \
+                               XO1(i+2,2)              \
+                                       XO1(i+3,3)      \
+               PF3(i)                                  \
+                               PF3(i+2)                \
+               PF0(i+4)                                \
+                               PF0(i+6)                \
+               XO2(i,0)                                \
+                       XO2(i+1,1)                      \
+                               XO2(i+2,2)              \
+                                       XO2(i+3,3)      \
+               XO3(i,0)                                \
+                       XO3(i+1,1)                      \
+                               XO3(i+2,2)              \
+                                       XO3(i+3,3)      \
+               ST(i,0)                                 \
+                       ST(i+1,1)                       \
+                               ST(i+2,2)               \
+                                       ST(i+3,3)       \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+        " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       addl $256, %3           ;\n"
+        "       addl $256, %4           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+       :
+        : "memory" );
+
+       XMMS_RESTORE;
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+         unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+        unsigned long lines = bytes >> 8;
+       char xmm_save[16*4] ALIGN16;
+       int cr0;
+
+       XMMS_SAVE;
+
+       /* Make sure GCC forgets anything it knows about p4 or p5,
+          such that it won't pass to the asm volatile below a
+          register that is shared with any other variable.  That's
+          because we modify p4 and p5 there, but we can't mark them
+          as read/write, otherwise we'd overflow the 10-asm-operands
+          limit of GCC < 3.1.  */
+       __asm__ ("" : "+r" (p4), "+r" (p5));
+
+        __asm__ __volatile__ (
+#undef BLOCK
+#define BLOCK(i) \
+               PF1(i)                                  \
+                               PF1(i+2)                \
+               LD(i,0)                                 \
+                       LD(i+1,1)                       \
+                               LD(i+2,2)               \
+                                       LD(i+3,3)       \
+               PF2(i)                                  \
+                               PF2(i+2)                \
+               XO1(i,0)                                \
+                       XO1(i+1,1)                      \
+                               XO1(i+2,2)              \
+                                       XO1(i+3,3)      \
+               PF3(i)                                  \
+                               PF3(i+2)                \
+               XO2(i,0)                                \
+                       XO2(i+1,1)                      \
+                               XO2(i+2,2)              \
+                                       XO2(i+3,3)      \
+               PF4(i)                                  \
+                               PF4(i+2)                \
+               PF0(i+4)                                \
+                               PF0(i+6)                \
+               XO3(i,0)                                \
+                       XO3(i+1,1)                      \
+                               XO3(i+2,2)              \
+                                       XO3(i+3,3)      \
+               XO4(i,0)                                \
+                       XO4(i+1,1)                      \
+                               XO4(i+2,2)              \
+                                       XO4(i+3,3)      \
+               ST(i,0)                                 \
+                       ST(i+1,1)                       \
+                               ST(i+2,2)               \
+                                       ST(i+3,3)       \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+        " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+        "       addl $256, %1           ;\n"
+        "       addl $256, %2           ;\n"
+        "       addl $256, %3           ;\n"
+        "       addl $256, %4           ;\n"
+        "       addl $256, %5           ;\n"
+        "       decl %0                 ;\n"
+        "       jnz 1b                  ;\n"
+       : "+r" (lines),
+         "+r" (p1), "+r" (p2), "+r" (p3)
+       : "r" (p4), "r" (p5)
+       : "memory");
+
+       /* p4 and p5 were modified, and now the variables are dead.
+          Clobber them just to be sure nobody does something stupid
+          like assuming they have some legal value.  */
+       __asm__ ("" : "=r" (p4), "=r" (p5));
+
+       XMMS_RESTORE;
+}
+
+static struct xor_block_template xor_block_pIII_sse = {
+        .name = "pIII_sse",
+        .do_2 =  xor_sse_2,
+        .do_3 =  xor_sse_3,
+        .do_4 =  xor_sse_4,
+        .do_5 = xor_sse_5,
+};
+
+/* Also try the generic routines.  */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES                              \
+       do {                                            \
+               xor_speed(&xor_block_8regs);            \
+               xor_speed(&xor_block_8regs_p);          \
+               xor_speed(&xor_block_32regs);           \
+               xor_speed(&xor_block_32regs_p);         \
+               if (cpu_has_xmm)                        \
+                       xor_speed(&xor_block_pIII_sse); \
+               if (cpu_has_mmx) {                      \
+                       xor_speed(&xor_block_pII_mmx);  \
+                       xor_speed(&xor_block_p5_mmx);   \
+               }                                       \
+       } while (0)
+
+/* We force the use of the SSE xor block because it can write around L2.
+   We may also be able to load into the L1 only depending on how the cpu
+   deals with a load to a line that is being prefetched.  */
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+       (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/blkif.h b/linux-2.6.7-xen-sparse/include/asm-xen/blkif.h
new file mode 100644 (file)
index 0000000..1024629
--- /dev/null
@@ -0,0 +1,115 @@
+/******************************************************************************
+ * blkif.h
+ * 
+ * Unified block-device I/O interface for Xen guest OSes.
+ * 
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __SHARED_BLKIF_H__
+#define __SHARED_BLKIF_H__
+
+#define blkif_vdev_t   u16
+#define blkif_sector_t u64
+
+#define BLKIF_OP_READ      0
+#define BLKIF_OP_WRITE     1
+#define BLKIF_OP_PROBE     2
+
+/* NB. Ring size must be small enough for sizeof(blkif_ring_t) <= PAGE_SIZE. */
+#define BLKIF_RING_SIZE        64
+
+/*
+ * Maximum scatter/gather segments per request.
+ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
+ */
+#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
+
+typedef struct {
+    u8             operation;    /*  0: BLKIF_OP_???                         */
+    u8             nr_segments;  /*  1: number of segments                   */
+    blkif_vdev_t   device;       /*  2: only for read/write requests         */
+    unsigned long  id;           /*  4: private guest value, echoed in resp  */
+    blkif_sector_t sector_number;    /* start sector idx on disk (r/w only)  */
+    /* @f_a_s[2:0]=last_sect ; @f_a_s[5:3]=first_sect ; @f_a_s[:12]=frame.   */
+    /* @first_sect: first sector in frame to transfer (inclusive).           */
+    /* @last_sect: last sector in frame to transfer (inclusive).             */
+    /* @frame: machine page frame number.                                    */
+    unsigned long  frame_and_sects[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+} PACKED blkif_request_t;
+
+#define blkif_first_sect(_fas) (((_fas)>>3)&7)
+#define blkif_last_sect(_fas)  ((_fas)&7)
+
+typedef struct {
+    unsigned long   id;              /* copied from request */
+    u8              operation;       /* copied from request */
+    s16             status;          /* BLKIF_RSP_???       */
+} PACKED blkif_response_t;
+
+#define BLKIF_RSP_ERROR  -1 /* non-specific 'error' */
+#define BLKIF_RSP_OKAY    0 /* non-specific 'okay'  */
+
+/*
+ * We use a special capitalised type name because it is _essential_ that all 
+ * arithmetic on indexes is done on an integer type of the correct size.
+ */
+typedef u32 BLKIF_RING_IDX;
+
+/*
+ * Ring indexes are 'free running'. That is, they are not stored modulo the
+ * size of the ring buffer. The following macro converts a free-running counter
+ * into a value that can directly index a ring-buffer array.
+ */
+#define MASK_BLKIF_IDX(_i) ((_i)&(BLKIF_RING_SIZE-1))
+
+typedef struct {
+    BLKIF_RING_IDX req_prod;  /*  0: Request producer. Updated by front-end. */
+    BLKIF_RING_IDX resp_prod; /*  4: Response producer. Updated by back-end. */
+    union {                   /*  8 */
+        blkif_request_t  req;
+        blkif_response_t resp;
+    } PACKED ring[BLKIF_RING_SIZE];
+} PACKED blkif_ring_t;
+
+
+/*
+ * BLKIF_OP_PROBE:
+ * The request format for a probe request is constrained as follows:
+ *  @operation   == BLKIF_OP_PROBE
+ *  @nr_segments == size of probe buffer in pages
+ *  @device      == unused (zero)
+ *  @id          == any value (echoed in response message)
+ *  @sector_num  == unused (zero)
+ *  @frame_and_sects == list of page-sized buffers.
+ *                       (i.e., @first_sect == 0, @last_sect == 7).
+ * 
+ * The response is a list of vdisk_t elements copied into the out-of-band
+ * probe buffer. On success the response status field contains the number
+ * of vdisk_t elements.
+ */
+
+/* XXX SMH: Type values below are chosen to match ide_xxx in Linux ide.h. */
+#define VDISK_TYPE_FLOPPY  0x00
+#define VDISK_TYPE_TAPE    0x01
+#define VDISK_TYPE_CDROM   0x05
+#define VDISK_TYPE_OPTICAL 0x07
+#define VDISK_TYPE_DISK    0x20 
+
+#define VDISK_TYPE_MASK    0x3F
+#define VDISK_TYPE(_x)     ((_x) & VDISK_TYPE_MASK) 
+
+/* The top two bits of the type field encode various flags. */
+#define VDISK_FLAG_RO      0x40
+#define VDISK_FLAG_VIRT    0x80
+#define VDISK_READONLY(_x) ((_x) & VDISK_FLAG_RO)
+#define VDISK_VIRTUAL(_x)  ((_x) & VDISK_FLAG_VIRT) 
+
+typedef struct {
+    blkif_sector_t capacity;     /*  0: Size in terms of 512-byte sectors.   */
+    blkif_vdev_t   device;       /*  8: Device number (opaque 16 bit value). */
+    u16            info;         /* 10: Device type and flags (VDISK_*).     */
+} PACKED vdisk_t; /* 12 bytes */
+
+#endif /* __SHARED_BLKIF_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/ctrl_if.h b/linux-2.6.7-xen-sparse/include/asm-xen/ctrl_if.h
new file mode 100644 (file)
index 0000000..77b405e
--- /dev/null
@@ -0,0 +1,120 @@
+/******************************************************************************
+ * ctrl_if.h
+ * 
+ * Management functions for special interface to the domain controller.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __ASM_XEN__CTRL_IF_H__
+#define __ASM_XEN__CTRL_IF_H__
+
+#include <asm/hypervisor.h>
+
+typedef control_msg_t ctrl_msg_t;
+
+/*
+ * Callback function type. Called for asynchronous processing of received
+ * request messages, and responses to previously-transmitted request messages.
+ * The parameters are (@msg, @id).
+ *  @msg: Original request/response message (not a copy). The message can be
+ *        modified in-place by the handler (e.g., a response callback can
+ *        turn a request message into a response message in place). The message
+ *        is no longer accessible after the callback handler returns -- if the
+ *        message is required to persist for longer then it must be copied.
+ *  @id:  (Response callbacks only) The 'id' that was specified when the
+ *        original request message was queued for transmission.
+ */
+typedef void (*ctrl_msg_handler_t)(ctrl_msg_t *, unsigned long);
+
+/*
+ * Send @msg to the domain controller. Execute @hnd when a response is
+ * received, passing the response message and the specified @id. This
+ * operation will not block: it will return -EAGAIN if there is no space.
+ * Notes:
+ *  1. The @msg is copied if it is transmitted and so can be freed after this
+ *     function returns.
+ *  2. If @hnd is NULL then no callback is executed.
+ */
+int ctrl_if_send_message_noblock(
+    ctrl_msg_t *msg, 
+    ctrl_msg_handler_t hnd,
+    unsigned long id);
+
+/*
+ * Send @msg to the domain controller. Execute @hnd when a response is
+ * received, passing the response message and the specified @id. This
+ * operation will block until the message is sent, or a signal is received
+ * for the calling process (unless @wait_state is TASK_UNINTERRUPTIBLE).
+ * Notes:
+ *  1. The @msg is copied if it is transmitted and so can be freed after this
+ *     function returns.
+ *  2. If @hnd is NULL then no callback is executed.
+ */
+int ctrl_if_send_message_block(
+    ctrl_msg_t *msg, 
+    ctrl_msg_handler_t hnd, 
+    unsigned long id, 
+    long wait_state);
+
+/*
+ * Request a callback when there is /possibly/ space to immediately send a
+ * message to the domain controller. This function returns 0 if there is
+ * already space to trasnmit a message --- in this case the callback task /may/
+ * still be executed. If this function returns 1 then the callback /will/ be
+ * executed when space becomes available.
+ */
+int ctrl_if_enqueue_space_callback(struct work_struct *task);
+
+/*
+ * Send a response (@msg) to a message from the domain controller. This will 
+ * never block.
+ * Notes:
+ *  1. The @msg is copied and so can be freed after this function returns.
+ *  2. The @msg may be the original request message, modified in-place.
+ */
+void ctrl_if_send_response(ctrl_msg_t *msg);
+
+/*
+ * Register a receiver for typed messages from the domain controller. The 
+ * handler (@hnd) is called for every received message of specified @type.
+ * Returns TRUE (non-zero) if the handler was successfully registered.
+ * If CALLBACK_IN_BLOCKING CONTEXT is specified in @flags then callbacks will
+ * occur in a context in which it is safe to yield (i.e., process context).
+ */
+#define CALLBACK_IN_BLOCKING_CONTEXT 1
+int ctrl_if_register_receiver(
+    u8 type, 
+    ctrl_msg_handler_t hnd,
+    unsigned int flags);
+
+/*
+ * Unregister a receiver for typed messages from the domain controller. The 
+ * handler (@hnd) will not be executed after this function returns.
+ */
+void ctrl_if_unregister_receiver(u8 type, ctrl_msg_handler_t hnd);
+
+/* Suspend/resume notifications. */
+void ctrl_if_suspend(void);
+void ctrl_if_resume(void);
+
+/* Start-of-day setup. */
+void ctrl_if_init(void);
+
+/*
+ * Returns TRUE if there are no outstanding message requests at the domain
+ * controller. This can be used to ensure that messages have really flushed
+ * through when it is not possible to use the response-callback interface.
+ * WARNING: If other subsystems are using the control interface then this
+ * function might never return TRUE!
+ */
+int ctrl_if_transmitter_empty(void);  /* !! DANGEROUS FUNCTION !! */
+
+/*
+ * Manually discard response messages from the domain controller. 
+ * WARNING: This is usually done automatically -- this function should only
+ * be called when normal interrupt mechanisms are disabled!
+ */
+void ctrl_if_discard_responses(void); /* !! DANGEROUS FUNCTION !! */
+
+#endif /* __ASM_XEN__CONTROL_IF_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/domain_controller.h b/linux-2.6.7-xen-sparse/include/asm-xen/domain_controller.h
new file mode 100644 (file)
index 0000000..76dd164
--- /dev/null
@@ -0,0 +1,532 @@
+/******************************************************************************
+ * domain_controller.h
+ * 
+ * Interface to server controller (e.g., 'xend'). This header file defines the 
+ * interface that is shared with guest OSes.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __DOMAIN_CONTROLLER_H__
+#define __DOMAIN_CONTROLLER_H__
+
+
+#ifndef BASIC_START_INFO
+#error "Xen header file hypervisor-if.h must already be included here."
+#endif
+
+
+/*
+ * EXTENDED BOOTSTRAP STRUCTURE FOR NEW DOMAINS.
+ */
+
+typedef struct {
+    BASIC_START_INFO;
+    u16 domain_controller_evtchn; /* 320 */
+} PACKED extended_start_info_t; /* 322 bytes */
+#define SIF_BLK_BE_DOMAIN (1<<4)  /* Is this a block backend domain? */
+#define SIF_NET_BE_DOMAIN (1<<5)  /* Is this a net backend domain? */
+
+
+/*
+ * Reason codes for SCHEDOP_shutdown. These are opaque to Xen but may be
+ * interpreted by control software to determine the appropriate action. These 
+ * are only really advisories: the controller can actually do as it likes.
+ */
+#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
+#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
+#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
+
+
+/*
+ * CONTROLLER MESSAGING INTERFACE.
+ */
+
+typedef struct {
+    u8 type;     /*  0: echoed in response */
+    u8 subtype;  /*  1: echoed in response */
+    u8 id;       /*  2: echoed in response */
+    u8 length;   /*  3: number of bytes in 'msg' */
+    u8 msg[60];  /*  4: type-specific message data */
+} PACKED control_msg_t; /* 64 bytes */
+
+#define CONTROL_RING_SIZE 8
+typedef u32 CONTROL_RING_IDX;
+#define MASK_CONTROL_IDX(_i) ((_i)&(CONTROL_RING_SIZE-1))
+
+typedef struct {
+    control_msg_t tx_ring[CONTROL_RING_SIZE];   /*    0: guest -> controller */
+    control_msg_t rx_ring[CONTROL_RING_SIZE];   /*  512: controller -> guest */
+    CONTROL_RING_IDX tx_req_prod, tx_resp_prod; /* 1024, 1028 */
+    CONTROL_RING_IDX rx_req_prod, rx_resp_prod; /* 1032, 1036 */
+} PACKED control_if_t; /* 1040 bytes */
+
+/*
+ * Top-level command types.
+ */
+#define CMSG_CONSOLE        0  /* Console                 */
+#define CMSG_BLKIF_BE       1  /* Block-device backend    */
+#define CMSG_BLKIF_FE       2  /* Block-device frontend   */
+#define CMSG_NETIF_BE       3  /* Network-device backend  */
+#define CMSG_NETIF_FE       4  /* Network-device frontend */
+#define CMSG_SHUTDOWN       6  /* Shutdown messages       */
+
+
+/******************************************************************************
+ * CONSOLE DEFINITIONS
+ */
+
+/*
+ * Subtypes for console messages.
+ */
+#define CMSG_CONSOLE_DATA       0
+
+
+/******************************************************************************
+ * BLOCK-INTERFACE FRONTEND DEFINITIONS
+ */
+
+/* Messages from domain controller to guest. */
+#define CMSG_BLKIF_FE_INTERFACE_STATUS_CHANGED   0
+
+/* Messages from guest to domain controller. */
+#define CMSG_BLKIF_FE_DRIVER_STATUS_CHANGED     32
+#define CMSG_BLKIF_FE_INTERFACE_CONNECT         33
+#define CMSG_BLKIF_FE_INTERFACE_DISCONNECT      34
+
+/* These are used by both front-end and back-end drivers. */
+#define blkif_vdev_t   u16
+#define blkif_pdev_t   u16
+#define blkif_sector_t u64
+
+/*
+ * CMSG_BLKIF_FE_INTERFACE_STATUS_CHANGED:
+ *  Notify a guest about a status change on one of its block interfaces.
+ *  If the interface is DESTROYED or DOWN then the interface is disconnected:
+ *   1. The shared-memory frame is available for reuse.
+ *   2. Any unacknowledged messgaes pending on the interface were dropped.
+ */
+#define BLKIF_INTERFACE_STATUS_DESTROYED    0 /* Interface doesn't exist.    */
+#define BLKIF_INTERFACE_STATUS_DISCONNECTED 1 /* Exists but is disconnected. */
+#define BLKIF_INTERFACE_STATUS_CONNECTED    2 /* Exists and is connected.    */
+typedef struct {
+    u32 handle; /*  0 */
+    u32 status; /*  4 */
+    u16 evtchn; /*  8: (only if status == BLKIF_INTERFACE_STATUS_CONNECTED). */
+} PACKED blkif_fe_interface_status_changed_t; /* 10 bytes */
+
+/*
+ * CMSG_BLKIF_FE_DRIVER_STATUS_CHANGED:
+ *  Notify the domain controller that the front-end driver is DOWN or UP.
+ *  When the driver goes DOWN then the controller will send no more
+ *  status-change notifications. When the driver comes UP then the controller
+ *  will send a notification for each interface that currently exists.
+ *  If the driver goes DOWN while interfaces are still UP, the domain
+ *  will automatically take the interfaces DOWN.
+ */
+#define BLKIF_DRIVER_STATUS_DOWN   0
+#define BLKIF_DRIVER_STATUS_UP     1
+typedef struct {
+    /* IN */
+    u32 status;        /*  0: BLKIF_DRIVER_STATUS_??? */
+    /* OUT */
+    /*
+     * Tells driver how many interfaces it should expect to immediately
+     * receive notifications about.
+     */
+    u32 nr_interfaces; /*  4 */
+} PACKED blkif_fe_driver_status_changed_t; /* 8 bytes */
+
+/*
+ * CMSG_BLKIF_FE_INTERFACE_CONNECT:
+ *  If successful, the domain controller will acknowledge with a
+ *  STATUS_CONNECTED message.
+ */
+typedef struct {
+    u32      handle;      /*  0 */
+    u32      __pad;
+    memory_t shmem_frame; /*  8 */
+    MEMORY_PADDING;
+} PACKED blkif_fe_interface_connect_t; /* 16 bytes */
+
+/*
+ * CMSG_BLKIF_FE_INTERFACE_DISCONNECT:
+ *  If successful, the domain controller will acknowledge with a
+ *  STATUS_DISCONNECTED message.
+ */
+typedef struct {
+    u32 handle; /*  0 */
+} PACKED blkif_fe_interface_disconnect_t; /* 4 bytes */
+
+
+/******************************************************************************
+ * BLOCK-INTERFACE BACKEND DEFINITIONS
+ */
+
+/* Messages from domain controller. */
+#define CMSG_BLKIF_BE_CREATE      0  /* Create a new block-device interface. */
+#define CMSG_BLKIF_BE_DESTROY     1  /* Destroy a block-device interface.    */
+#define CMSG_BLKIF_BE_CONNECT     2  /* Connect i/f to remote driver.        */
+#define CMSG_BLKIF_BE_DISCONNECT  3  /* Disconnect i/f from remote driver.   */
+#define CMSG_BLKIF_BE_VBD_CREATE  4  /* Create a new VBD for an interface.   */
+#define CMSG_BLKIF_BE_VBD_DESTROY 5  /* Delete a VBD from an interface.      */
+#define CMSG_BLKIF_BE_VBD_GROW    6  /* Append an extent to a given VBD.     */
+#define CMSG_BLKIF_BE_VBD_SHRINK  7  /* Remove last extent from a given VBD. */
+
+/* Messages to domain controller. */
+#define CMSG_BLKIF_BE_DRIVER_STATUS_CHANGED 32
+
+/*
+ * Message request/response definitions for block-device messages.
+ */
+
+typedef struct {
+    blkif_sector_t sector_start;   /*  0 */
+    blkif_sector_t sector_length;  /*  8 */
+    blkif_pdev_t   device;         /* 16 */
+    u16            __pad;          /* 18 */
+} PACKED blkif_extent_t; /* 20 bytes */
+
+/* Non-specific 'okay' return. */
+#define BLKIF_BE_STATUS_OKAY                0
+/* Non-specific 'error' return. */
+#define BLKIF_BE_STATUS_ERROR               1
+/* The following are specific error returns. */
+#define BLKIF_BE_STATUS_INTERFACE_EXISTS    2
+#define BLKIF_BE_STATUS_INTERFACE_NOT_FOUND 3
+#define BLKIF_BE_STATUS_INTERFACE_CONNECTED 4
+#define BLKIF_BE_STATUS_VBD_EXISTS          5
+#define BLKIF_BE_STATUS_VBD_NOT_FOUND       6
+#define BLKIF_BE_STATUS_OUT_OF_MEMORY       7
+#define BLKIF_BE_STATUS_EXTENT_NOT_FOUND    8
+#define BLKIF_BE_STATUS_MAPPING_ERROR       9
+
+/* This macro can be used to create an array of descriptive error strings. */
+#define BLKIF_BE_STATUS_ERRORS {    \
+    "Okay",                         \
+    "Non-specific error",           \
+    "Interface already exists",     \
+    "Interface not found",          \
+    "Interface is still connected", \
+    "VBD already exists",           \
+    "VBD not found",                \
+    "Out of memory",                \
+    "Extent not found for VBD",     \
+    "Could not map domain memory" }
+
+/*
+ * CMSG_BLKIF_BE_CREATE:
+ *  When the driver sends a successful response then the interface is fully
+ *  created. The controller will send a DOWN notification to the front-end
+ *  driver.
+ */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Domain attached to new interface.   */
+    u32        blkif_handle;  /*  4: Domain-specific interface handle.   */
+    /* OUT */
+    u32        status;        /*  8 */
+} PACKED blkif_be_create_t; /* 12 bytes */
+
+/*
+ * CMSG_BLKIF_BE_DESTROY:
+ *  When the driver sends a successful response then the interface is fully
+ *  torn down. The controller will send a DESTROYED notification to the
+ *  front-end driver.
+ */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Identify interface to be destroyed. */
+    u32        blkif_handle;  /*  4: ...ditto...                         */
+    /* OUT */
+    u32        status;        /*  8 */
+} PACKED blkif_be_destroy_t; /* 12 bytes */
+
+/*
+ * CMSG_BLKIF_BE_CONNECT:
+ *  When the driver sends a successful response then the interface is fully
+ *  connected. The controller will send a CONNECTED notification to the
+ *  front-end driver.
+ */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Domain attached to new interface.   */
+    u32        blkif_handle;  /*  4: Domain-specific interface handle.   */
+    memory_t   shmem_frame;   /*  8: Page cont. shared comms window.     */
+    MEMORY_PADDING;
+    u32        evtchn;        /* 16: Event channel for notifications.    */
+    /* OUT */
+    u32        status;        /* 20 */
+} PACKED blkif_be_connect_t;  /* 24 bytes */
+
+/*
+ * CMSG_BLKIF_BE_DISCONNECT:
+ *  When the driver sends a successful response then the interface is fully
+ *  disconnected. The controller will send a DOWN notification to the front-end
+ *  driver.
+ */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Domain attached to new interface.   */
+    u32        blkif_handle;  /*  4: Domain-specific interface handle.   */
+    /* OUT */
+    u32        status;        /*  8 */
+} PACKED blkif_be_disconnect_t; /* 12 bytes */
+
+/* CMSG_BLKIF_BE_VBD_CREATE */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Identify blkdev interface.          */
+    u32        blkif_handle;  /*  4: ...ditto...                         */
+    blkif_vdev_t vdevice;     /*  8: Interface-specific id for this VBD. */
+    u16        readonly;      /* 10: Non-zero -> VBD isn't writeable.    */
+    /* OUT */
+    u32        status;        /* 12 */
+} PACKED blkif_be_vbd_create_t; /* 16 bytes */
+
+/* CMSG_BLKIF_BE_VBD_DESTROY */
+typedef struct {
+    /* IN */
+    domid_t    domid;         /*  0: Identify blkdev interface.          */
+    u32        blkif_handle;  /*  4: ...ditto...                         */
+    blkif_vdev_t vdevice;     /*  8: Interface-specific id of the VBD.   */
+    u16        __pad;         /* 10 */
+    /* OUT */
+    u32        status;        /* 12 */
+} PACKED blkif_be_vbd_destroy_t; /* 16 bytes */
+
+/* CMSG_BLKIF_BE_VBD_GROW */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Identify blkdev interface.          */
+    u32        blkif_handle;  /*  4: ...ditto...                         */
+    blkif_extent_t extent;    /*  8: Physical extent to append to VBD.   */
+    blkif_vdev_t vdevice;     /* 28: Interface-specific id of the VBD.   */
+    u16        __pad;         /* 30 */
+    /* OUT */
+    u32        status;        /* 32 */
+} PACKED blkif_be_vbd_grow_t; /* 36 bytes */
+
+/* CMSG_BLKIF_BE_VBD_SHRINK */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Identify blkdev interface.          */
+    u32        blkif_handle;  /*  4: ...ditto...                         */
+    blkif_vdev_t vdevice;     /*  8: Interface-specific id of the VBD.   */
+    u16        __pad;         /* 10 */
+    /* OUT */
+    u32        status;        /* 12 */
+} PACKED blkif_be_vbd_shrink_t; /* 16 bytes */
+
+/*
+ * CMSG_BLKIF_BE_DRIVER_STATUS_CHANGED:
+ *  Notify the domain controller that the back-end driver is DOWN or UP.
+ *  If the driver goes DOWN while interfaces are still UP, the controller
+ *  will automatically send DOWN notifications.
+ */
+typedef struct {
+    u32        status;        /*  0: BLKIF_DRIVER_STATUS_??? */
+} PACKED blkif_be_driver_status_changed_t; /* 4 bytes */
+
+
+/******************************************************************************
+ * NETWORK-INTERFACE FRONTEND DEFINITIONS
+ */
+
+/* Messages from domain controller to guest. */
+#define CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED   0
+
+/* Messages from guest to domain controller. */
+#define CMSG_NETIF_FE_DRIVER_STATUS_CHANGED     32
+#define CMSG_NETIF_FE_INTERFACE_CONNECT         33
+#define CMSG_NETIF_FE_INTERFACE_DISCONNECT      34
+
+/*
+ * CMSG_NETIF_FE_INTERFACE_STATUS_CHANGED:
+ *  Notify a guest about a status change on one of its network interfaces.
+ *  If the interface is DESTROYED or DOWN then the interface is disconnected:
+ *   1. The shared-memory frame is available for reuse.
+ *   2. Any unacknowledged messgaes pending on the interface were dropped.
+ */
+#define NETIF_INTERFACE_STATUS_DESTROYED    0 /* Interface doesn't exist.    */
+#define NETIF_INTERFACE_STATUS_DISCONNECTED 1 /* Exists but is disconnected. */
+#define NETIF_INTERFACE_STATUS_CONNECTED    2 /* Exists and is connected.    */
+typedef struct {
+    u32        handle; /*  0 */
+    u32        status; /*  4 */
+    u16        evtchn; /*  8: status == NETIF_INTERFACE_STATUS_CONNECTED */
+    u8         mac[6]; /* 10: status == NETIF_INTERFACE_STATUS_CONNECTED */
+} PACKED netif_fe_interface_status_changed_t; /* 16 bytes */
+
+/*
+ * CMSG_NETIF_FE_DRIVER_STATUS_CHANGED:
+ *  Notify the domain controller that the front-end driver is DOWN or UP.
+ *  When the driver goes DOWN then the controller will send no more
+ *  status-change notifications. When the driver comes UP then the controller
+ *  will send a notification for each interface that currently exists.
+ *  If the driver goes DOWN while interfaces are still UP, the domain
+ *  will automatically take the interfaces DOWN.
+ */
+#define NETIF_DRIVER_STATUS_DOWN   0
+#define NETIF_DRIVER_STATUS_UP     1
+typedef struct {
+    /* IN */
+    u32        status;        /*  0: NETIF_DRIVER_STATUS_??? */
+    /* OUT */
+    /*
+     * Tells driver how many interfaces it should expect to immediately
+     * receive notifications about.
+     */
+    u32        nr_interfaces; /*  4 */
+} PACKED netif_fe_driver_status_changed_t; /* 8 bytes */
+
+/*
+ * CMSG_NETIF_FE_INTERFACE_CONNECT:
+ *  If successful, the domain controller will acknowledge with a
+ *  STATUS_CONNECTED message.
+ */
+typedef struct {
+    u32        handle;         /*  0 */
+    u32        __pad;          /*  4 */
+    memory_t   tx_shmem_frame; /*  8 */
+    MEMORY_PADDING;
+    memory_t   rx_shmem_frame; /* 16 */
+    MEMORY_PADDING;
+} PACKED netif_fe_interface_connect_t; /* 24 bytes */
+
+/*
+ * CMSG_NETIF_FE_INTERFACE_DISCONNECT:
+ *  If successful, the domain controller will acknowledge with a
+ *  STATUS_DISCONNECTED message.
+ */
+typedef struct {
+    u32        handle;        /*  0 */
+} PACKED netif_fe_interface_disconnect_t; /* 4 bytes */
+
+
+/******************************************************************************
+ * NETWORK-INTERFACE BACKEND DEFINITIONS
+ */
+
+/* Messages from domain controller. */
+#define CMSG_NETIF_BE_CREATE      0  /* Create a new net-device interface. */
+#define CMSG_NETIF_BE_DESTROY     1  /* Destroy a net-device interface.    */
+#define CMSG_NETIF_BE_CONNECT     2  /* Connect i/f to remote driver.        */
+#define CMSG_NETIF_BE_DISCONNECT  3  /* Disconnect i/f from remote driver.   */
+
+/* Messages to domain controller. */
+#define CMSG_NETIF_BE_DRIVER_STATUS_CHANGED 32
+
+/*
+ * Message request/response definitions for net-device messages.
+ */
+
+/* Non-specific 'okay' return. */
+#define NETIF_BE_STATUS_OKAY                0
+/* Non-specific 'error' return. */
+#define NETIF_BE_STATUS_ERROR               1
+/* The following are specific error returns. */
+#define NETIF_BE_STATUS_INTERFACE_EXISTS    2
+#define NETIF_BE_STATUS_INTERFACE_NOT_FOUND 3
+#define NETIF_BE_STATUS_INTERFACE_CONNECTED 4
+#define NETIF_BE_STATUS_OUT_OF_MEMORY       5
+#define NETIF_BE_STATUS_MAPPING_ERROR       6
+
+/* This macro can be used to create an array of descriptive error strings. */
+#define NETIF_BE_STATUS_ERRORS {    \
+    "Okay",                         \
+    "Non-specific error",           \
+    "Interface already exists",     \
+    "Interface not found",          \
+    "Interface is still connected", \
+    "Out of memory",                \
+    "Could not map domain memory" }
+
+/*
+ * CMSG_NETIF_BE_CREATE:
+ *  When the driver sends a successful response then the interface is fully
+ *  created. The controller will send a DOWN notification to the front-end
+ *  driver.
+ */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Domain attached to new interface.   */
+    u32        netif_handle;  /*  4: Domain-specific interface handle.   */
+    u8         mac[6];        /*  8 */
+    u16        __pad;         /* 14 */
+    /* OUT */
+    u32        status;        /* 16 */
+} PACKED netif_be_create_t; /* 20 bytes */
+
+/*
+ * CMSG_NETIF_BE_DESTROY:
+ *  When the driver sends a successful response then the interface is fully
+ *  torn down. The controller will send a DESTROYED notification to the
+ *  front-end driver.
+ */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Identify interface to be destroyed. */
+    u32        netif_handle;  /*  4: ...ditto...                         */
+    /* OUT */
+    u32   status;             /*  8 */
+} PACKED netif_be_destroy_t; /* 12 bytes */
+
+/*
+ * CMSG_NETIF_BE_CONNECT:
+ *  When the driver sends a successful response then the interface is fully
+ *  connected. The controller will send a CONNECTED notification to the
+ *  front-end driver.
+ */
+typedef struct { 
+    /* IN */
+    domid_t    domid;          /*  0: Domain attached to new interface.   */
+    u32        netif_handle;   /*  4: Domain-specific interface handle.   */
+    memory_t   tx_shmem_frame; /*  8: Page cont. tx shared comms window.  */
+    MEMORY_PADDING;
+    memory_t   rx_shmem_frame; /* 16: Page cont. rx shared comms window.  */
+    MEMORY_PADDING;
+    u16        evtchn;         /* 24: Event channel for notifications.    */
+    u16        __pad;          /* 26 */
+    /* OUT */
+    u32        status;         /* 28 */
+} PACKED netif_be_connect_t; /* 32 bytes */
+
+/*
+ * CMSG_NETIF_BE_DISCONNECT:
+ *  When the driver sends a successful response then the interface is fully
+ *  disconnected. The controller will send a DOWN notification to the front-end
+ *  driver.
+ */
+typedef struct { 
+    /* IN */
+    domid_t    domid;         /*  0: Domain attached to new interface.   */
+    u32        netif_handle;  /*  4: Domain-specific interface handle.   */
+    /* OUT */
+    u32        status;        /*  8 */
+} PACKED netif_be_disconnect_t; /* 12 bytes */
+
+/*
+ * CMSG_NETIF_BE_DRIVER_STATUS_CHANGED:
+ *  Notify the domain controller that the back-end driver is DOWN or UP.
+ *  If the driver goes DOWN while interfaces are still UP, the domain
+ *  will automatically send DOWN notifications.
+ */
+typedef struct {
+    u32        status;        /*  0: NETIF_DRIVER_STATUS_??? */
+} PACKED netif_be_driver_status_changed_t; /* 4 bytes */
+
+
+/******************************************************************************
+ * SHUTDOWN DEFINITIONS
+ */
+
+/*
+ * Subtypes for shutdown messages.
+ */
+#define CMSG_SHUTDOWN_POWEROFF  0   /* Clean shutdown (SHUTDOWN_poweroff).   */
+#define CMSG_SHUTDOWN_REBOOT    1   /* Clean shutdown (SHUTDOWN_reboot).     */
+#define CMSG_SHUTDOWN_SUSPEND   2   /* Create suspend info, then             */
+                                    /* SHUTDOWN_suspend.                     */
+
+#endif /* __DOMAIN_CONTROLLER_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/evtchn.h b/linux-2.6.7-xen-sparse/include/asm-xen/evtchn.h
new file mode 100644 (file)
index 0000000..6509317
--- /dev/null
@@ -0,0 +1,83 @@
+/******************************************************************************
+ * evtchn.h
+ * 
+ * Communication via Xen event channels.
+ * Also definitions for the device that demuxes notifications to userspace.
+ * 
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __ASM_EVTCHN_H__
+#define __ASM_EVTCHN_H__
+
+#include <linux/config.h>
+#include <asm/hypervisor.h>
+#include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
+#include <asm/hypervisor-ifs/event_channel.h>
+
+/*
+ * LOW-LEVEL DEFINITIONS
+ */
+
+/* Entry point for notifications into Linux subsystems. */
+void evtchn_do_upcall(struct pt_regs *regs);
+
+/* Entry point for notifications into the userland character device. */
+void evtchn_device_upcall(int port);
+
+static inline void mask_evtchn(int port)
+{
+    shared_info_t *s = HYPERVISOR_shared_info;
+    synch_set_bit(port, &s->evtchn_mask[0]);
+}
+
+static inline void unmask_evtchn(int port)
+{
+    shared_info_t *s = HYPERVISOR_shared_info;
+
+    synch_clear_bit(port, &s->evtchn_mask[0]);
+
+    /*
+     * The following is basically the equivalent of 'hw_resend_irq'. Just like
+     * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
+     */
+    if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
+         !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
+    {
+        s->vcpu_data[0].evtchn_upcall_pending = 1;
+        if ( !s->vcpu_data[0].evtchn_upcall_mask )
+            evtchn_do_upcall(NULL);
+    }
+}
+
+static inline void clear_evtchn(int port)
+{
+    shared_info_t *s = HYPERVISOR_shared_info;
+    synch_clear_bit(port, &s->evtchn_pending[0]);
+}
+
+static inline void notify_via_evtchn(int port)
+{
+    evtchn_op_t op;
+    op.cmd = EVTCHNOP_send;
+    op.u.send.local_port = port;
+    (void)HYPERVISOR_event_channel_op(&op);
+}
+
+/*
+ * CHARACTER-DEVICE DEFINITIONS
+ */
+
+/* /dev/xen/evtchn resides at device number major=10, minor=200 */
+#define EVTCHN_MINOR 200
+
+/* /dev/xen/evtchn ioctls: */
+/* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
+#define EVTCHN_RESET  _IO('E', 1)
+/* EVTCHN_BIND: Bind to teh specified event-channel port. */
+#define EVTCHN_BIND   _IO('E', 2)
+/* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
+#define EVTCHN_UNBIND _IO('E', 3)
+
+#endif /* __ASM_EVTCHN_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/arch-x86_32.h b/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/arch-x86_32.h
new file mode 100644 (file)
index 0000000..80055a5
--- /dev/null
@@ -0,0 +1,138 @@
+/******************************************************************************
+ * arch-i386/hypervisor-if.h
+ * 
+ * Guest OS interface to x86 32-bit Xen.
+ */
+
+#ifndef __HYPERVISOR_IF_I386_H__
+#define __HYPERVISOR_IF_I386_H__
+
+/*
+ * Pointers and other address fields inside interface structures are padded to
+ * 64 bits. This means that field alignments aren't different between 32- and
+ * 64-bit architectures. 
+ */
+/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
+#define __MEMORY_PADDING(_X) u32 __pad_ ## _X
+#define _MEMORY_PADDING(_X)  __MEMORY_PADDING(_X)
+#define MEMORY_PADDING       _MEMORY_PADDING(__LINE__)
+
+/*
+ * SEGMENT DESCRIPTOR TABLES
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way.
+ * 
+ * NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY
+ * and LAST_RESERVED_GDT_ENTRY are reserved).
+ */
+#define NR_RESERVED_GDT_ENTRIES    40
+#define FIRST_RESERVED_GDT_ENTRY   256
+#define LAST_RESERVED_GDT_ENTRY    \
+  (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
+
+
+/*
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+#define FLAT_RING1_CS 0x0819    /* GDT index 259 */
+#define FLAT_RING1_DS 0x0821    /* GDT index 260 */
+#define FLAT_RING3_CS 0x082b    /* GDT index 261 */
+#define FLAT_RING3_DS 0x0833    /* GDT index 262 */
+
+#define FLAT_GUESTOS_CS FLAT_RING1_CS
+#define FLAT_GUESTOS_DS FLAT_RING1_DS
+#define FLAT_USER_CS    FLAT_RING3_CS
+#define FLAT_USER_DS    FLAT_RING3_DS
+
+/* And the trap vector is... */
+#define TRAP_INSTR "int $0x82"
+
+
+/*
+ * Virtual addresses beyond this are not modifiable by guest OSes. The 
+ * machine->physical mapping table starts at this address, read-only.
+ */
+#define HYPERVISOR_VIRT_START (0xFC000000UL)
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* NB. Both the following are 32 bits each. */
+typedef unsigned long memory_t;   /* Full-sized pointer/address/memory-size. */
+typedef unsigned long cpureg_t;   /* Full-sized register.                    */
+
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table()
+ */
+#define TI_GET_DPL(_ti)      ((_ti)->flags & 3)
+#define TI_GET_IF(_ti)       ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
+typedef struct {
+    u8       vector;  /* 0: exception vector                              */
+    u8       flags;   /* 1: 0-3: privilege level; 4: clear event enable?  */
+    u16      cs;      /* 2: code selector                                 */
+    memory_t address; /* 4: code address                                  */
+} PACKED trap_info_t; /* 8 bytes */
+
+typedef struct
+{
+    unsigned long ebx;
+    unsigned long ecx;
+    unsigned long edx;
+    unsigned long esi;
+    unsigned long edi;
+    unsigned long ebp;
+    unsigned long eax;
+    unsigned long ds;
+    unsigned long es;
+    unsigned long fs;
+    unsigned long gs;
+    unsigned long _unused;
+    unsigned long eip;
+    unsigned long cs;
+    unsigned long eflags;
+    unsigned long esp;
+    unsigned long ss;
+} PACKED execution_context_t;
+
+typedef struct {
+    u32  tsc_bits;      /* 0: 32 bits read from the CPU's TSC. */
+    u32  tsc_bitshift;  /* 4: 'tsc_bits' uses N:N+31 of TSC.   */
+} PACKED tsc_timestamp_t; /* 8 bytes */
+
+/*
+ * The following is all CPU context. Note that the i387_ctxt block is filled 
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+typedef struct {
+#define ECF_I387_VALID (1<<0)
+    unsigned long flags;
+    execution_context_t cpu_ctxt;           /* User-level CPU registers     */
+    char          fpu_ctxt[256];            /* User-level FPU registers     */
+    trap_info_t   trap_ctxt[256];           /* Virtual IDT                  */
+    unsigned int  fast_trap_idx;            /* "Fast trap" vector offset    */
+    unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
+    unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+    unsigned long guestos_ss, guestos_esp;  /* Virtual TSS (only SS1/ESP1)  */
+    unsigned long pt_base;                  /* CR3 (pagetable base)         */
+    unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
+    unsigned long event_callback_cs;        /* CS:EIP of event callback     */
+    unsigned long event_callback_eip;
+    unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
+    unsigned long failsafe_callback_eip;
+} PACKED full_execution_context_t;
+
+#define ARCH_HAS_FAST_TRAP
+
+#endif
+
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/arch-x86_64.h b/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/arch-x86_64.h
new file mode 100644 (file)
index 0000000..701084c
--- /dev/null
@@ -0,0 +1,135 @@
+/******************************************************************************
+ * arch-x86_64/hypervisor-if.h
+ * 
+ * Guest OS interface to AMD x86-64 bit Xen.
+ */
+
+#ifndef __HYPERVISOR_IF_X86_64_H__
+#define __HYPERVISOR_IF_X86_64_H__
+
+/* Pointers are naturally 64 bits in this architecture; no padding needed. */
+#define _MEMORY_PADDING(_X)
+#define MEMORY_PADDING 
+
+/*
+ * SEGMENT DESCRIPTOR TABLES
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way.
+ * 
+ * NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY
+ * and LAST_RESERVED_GDT_ENTRY are reserved).
+ */
+#define NR_RESERVED_GDT_ENTRIES    40 
+#define FIRST_RESERVED_GDT_ENTRY   256
+#define LAST_RESERVED_GDT_ENTRY    \
+  (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
+
+/*
+ * 64-bit segment selectors
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+
+#define FLAT_RING3_CS32 0x0823  /* GDT index 260 */
+#define FLAT_RING3_CS64 0x082b  /* GDT index 261 */
+#define FLAT_RING3_DS   0x0833  /* GDT index 262 */
+
+#define FLAT_GUESTOS_DS   FLAT_RING3_DS
+#define FLAT_GUESTOS_CS   FLAT_RING3_CS64
+#define FLAT_GUESTOS_CS32 FLAT_RING3_CS32
+
+#define FLAT_USER_DS      FLAT_RING3_DS
+#define FLAT_USER_CS      FLAT_RING3_CS64
+#define FLAT_USER_CS32    FLAT_RING3_CS32
+
+/* And the trap vector is... */
+#define TRAP_INSTR "syscall"
+
+/* The machine->physical mapping table starts at this address, read-only. */
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)0xffff810000000000ULL)
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* NB. Both the following are 64 bits each. */
+typedef unsigned long memory_t;   /* Full-sized pointer/address/memory-size. */
+typedef unsigned long cpureg_t;   /* Full-sized register.                    */
+
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table()
+ */
+#define TI_GET_DPL(_ti)      ((_ti)->flags & 3)
+#define TI_GET_IF(_ti)       ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
+typedef struct {
+    u8       vector;  /* 0: exception vector                              */
+    u8       flags;   /* 1: 0-3: privilege level; 4: clear event enable?  */
+    u16      cs;      /* 2: code selector                                 */
+    u32      __pad;   /* 4 */
+    memory_t address; /* 8: code address                                  */
+} PACKED trap_info_t; /* 16 bytes */
+
+typedef struct
+{
+    unsigned long r15;
+    unsigned long r14;
+    unsigned long r13;
+    unsigned long r12;
+    unsigned long rbp;
+    unsigned long rbx;
+    unsigned long r11;
+    unsigned long r10;
+    unsigned long r9;
+    unsigned long r8;
+    unsigned long rax;
+    unsigned long rcx;
+    unsigned long rdx;
+    unsigned long rsi;
+    unsigned long rdi;
+    unsigned long rip;
+    unsigned long cs;
+    unsigned long eflags;
+    unsigned long rsp;
+    unsigned long ss;
+} PACKED execution_context_t;
+
+/*
+ * NB. This may become a 64-bit count with no shift. If this happens then the 
+ * structure size will still be 8 bytes, so no other alignments will change.
+ */
+typedef struct {
+    u32  tsc_bits;      /* 0: 32 bits read from the CPU's TSC. */
+    u32  tsc_bitshift;  /* 4: 'tsc_bits' uses N:N+31 of TSC.   */
+} PACKED tsc_timestamp_t; /* 8 bytes */
+
+/*
+ * The following is all CPU context. Note that the i387_ctxt block is filled 
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+typedef struct {
+#define ECF_I387_VALID (1<<0)
+    unsigned long flags;
+    execution_context_t cpu_ctxt;           /* User-level CPU registers     */
+    char          fpu_ctxt[512];            /* User-level FPU registers     */
+    trap_info_t   trap_ctxt[256];           /* Virtual IDT                  */
+    unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
+    unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+    unsigned long guestos_ss, guestos_esp;  /* Virtual TSS (only SS1/ESP1)  */
+    unsigned long pt_base;                  /* CR3 (pagetable base)         */
+    unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
+    unsigned long event_callback_cs;        /* CS:EIP of event callback     */
+    unsigned long event_callback_eip;
+    unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
+    unsigned long failsafe_callback_eip;
+} PACKED full_execution_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __HYPERVISOR_IF_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/dom0_ops.h b/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/dom0_ops.h
new file mode 100644 (file)
index 0000000..9cb5656
--- /dev/null
@@ -0,0 +1,356 @@
+/******************************************************************************
+ * dom0_ops.h
+ * 
+ * Process command requests from domain-0 guest OS.
+ * 
+ * Copyright (c) 2002-2003, B Dragovic
+ * Copyright (c) 2002-2004, K Fraser
+ */
+
+
+#ifndef __DOM0_OPS_H__
+#define __DOM0_OPS_H__
+
+#include "hypervisor-if.h"
+#include "sched_ctl.h"
+
+/*
+ * Make sure you increment the interface version whenever you modify this file!
+ * This makes sure that old versions of dom0 tools will stop working in a
+ * well-defined way (rather than crashing the machine, for instance).
+ */
+#define DOM0_INTERFACE_VERSION   0xAAAA0010
+
+#define MAX_DOMAIN_NAME    16
+
+/************************************************************************/
+
+#define DOM0_GETMEMLIST        2
+typedef struct {
+    /* IN variables. */
+    domid_t       domain;             /*  0 */
+    u32           __pad;
+    memory_t      max_pfns;           /*  8 */
+    MEMORY_PADDING;
+    void         *buffer;             /* 16 */
+    MEMORY_PADDING;
+    /* OUT variables. */
+    memory_t      num_pfns;           /* 24 */
+    MEMORY_PADDING;
+} PACKED dom0_getmemlist_t; /* 32 bytes */
+
+#define DOM0_SCHEDCTL          6
+ /* struct sched_ctl_cmd is from sched-ctl.h   */
+typedef struct sched_ctl_cmd dom0_schedctl_t;
+
+#define DOM0_ADJUSTDOM         7
+/* struct sched_adjdom_cmd is from sched-ctl.h */
+typedef struct sched_adjdom_cmd dom0_adjustdom_t;
+
+#define DOM0_CREATEDOMAIN      8
+typedef struct {
+    /* IN parameters. */
+    memory_t     memory_kb;           /*  0 */
+    MEMORY_PADDING;
+    u8           name[MAX_DOMAIN_NAME]; /*  8 */
+    u32          cpu;                 /* 24 */
+    u32          __pad;               /* 28 */
+    /* OUT parameters. */
+    domid_t      domain;              /* 32 */
+} PACKED dom0_createdomain_t; /* 36 bytes */
+
+#define DOM0_DESTROYDOMAIN     9
+typedef struct {
+    /* IN variables. */
+    domid_t      domain;              /*  0 */
+} PACKED dom0_destroydomain_t; /* 4 bytes */
+
+#define DOM0_PAUSEDOMAIN      10
+typedef struct {
+    /* IN parameters. */
+    domid_t domain;                   /*  0 */
+} PACKED dom0_pausedomain_t; /* 4 bytes */
+
+#define DOM0_UNPAUSEDOMAIN    11
+typedef struct {
+    /* IN parameters. */
+    domid_t domain;                   /*  0 */
+} PACKED dom0_unpausedomain_t; /* 4 bytes */
+
+#define DOM0_GETDOMAININFO    12
+typedef struct {
+    /* IN variables. */
+    domid_t  domain;                  /*  0 */ /* NB. IN/OUT variable. */
+    /* OUT variables. */
+#define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
+#define DOMFLAGS_CRASHED   (1<<1) /* Crashed domain; frozen for postmortem.  */
+#define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut itself down.      */
+#define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
+#define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
+#define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
+#define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
+#define DOMFLAGS_CPUSHIFT       8
+#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
+#define DOMFLAGS_SHUTDOWNSHIFT 16
+    u32      flags;                   /*  4 */
+    u8       name[MAX_DOMAIN_NAME];   /*  8 */
+    full_execution_context_t *ctxt;   /* 24 */ /* NB. IN/OUT variable. */
+    MEMORY_PADDING;
+    memory_t tot_pages;               /* 32 */
+    MEMORY_PADDING;
+    memory_t max_pages;               /* 40 */
+    MEMORY_PADDING;
+    memory_t shared_info_frame;       /* 48: MFN of shared_info struct */
+    MEMORY_PADDING;
+    u64      cpu_time;                /* 56 */
+} PACKED dom0_getdomaininfo_t; /* 64 bytes */
+
+#define DOM0_BUILDDOMAIN      13
+typedef struct {
+    /* IN variables. */
+    domid_t                 domain;   /*  0 */
+    u32                     __pad;    /*  4 */
+    /* IN/OUT parameters */
+    full_execution_context_t *ctxt;   /*  8 */
+    MEMORY_PADDING;
+} PACKED dom0_builddomain_t; /* 16 bytes */
+
+#define DOM0_IOPL             14
+typedef struct {
+    domid_t domain;                   /*  0 */
+    u32     iopl;                     /*  4 */
+} PACKED dom0_iopl_t; /* 8 bytes */
+
+#define DOM0_MSR              15
+typedef struct {
+    /* IN variables. */
+    u32 write;                        /*  0 */
+    u32 cpu_mask;                     /*  4 */
+    u32 msr;                          /*  8 */
+    u32 in1;                          /* 12 */
+    u32 in2;                          /* 16 */
+    /* OUT variables. */
+    u32 out1;                         /* 20 */
+    u32 out2;                         /* 24 */
+} PACKED dom0_msr_t; /* 28 bytes */
+
+#define DOM0_DEBUG            16
+typedef struct {
+    /* IN variables. */
+    domid_t domain;                   /*  0 */
+    u8  opcode;                       /*  4 */
+    u8  __pad0, __pad1, __pad2;
+    u32 in1;                          /*  8 */
+    u32 in2;                          /* 12 */
+    u32 in3;                          /* 16 */
+    u32 in4;                          /* 20 */
+    /* OUT variables. */
+    u32 status;                       /* 24 */
+    u32 out1;                         /* 28 */
+    u32 out2;                         /* 32 */
+} PACKED dom0_debug_t; /* 36 bytes */
+
+/*
+ * Set clock such that it would read <secs,usecs> after 00:00:00 UTC,
+ * 1 January, 1970 if the current system time was <system_time>.
+ */
+#define DOM0_SETTIME          17
+typedef struct {
+    /* IN variables. */
+    u32 secs;                         /*  0 */
+    u32 usecs;                        /*  4 */
+    u64 system_time;                  /*  8 */
+} PACKED dom0_settime_t; /* 16 bytes */
+
+#define DOM0_GETPAGEFRAMEINFO 18
+#define NOTAB 0         /* normal page */
+#define L1TAB (1<<28)
+#define L2TAB (2<<28)
+#define L3TAB (3<<28)
+#define L4TAB (4<<28)
+#define XTAB  (0xf<<28) /* invalid page */
+#define LTAB_MASK XTAB
+typedef struct {
+    /* IN variables. */
+    memory_t pfn;          /*  0: Machine page frame number to query.       */
+    MEMORY_PADDING;
+    domid_t domain;        /*  8: To which domain does the frame belong?    */
+    /* OUT variables. */
+    /* Is the page PINNED to a type? */
+    u32 type;              /* 12: see above type defs */
+} PACKED dom0_getpageframeinfo_t; /* 16 bytes */
+
+/*
+ * Read console content from Xen buffer ring.
+ */
+#define DOM0_READCONSOLE      19
+typedef struct {
+    memory_t str;                     /*  0 */
+    MEMORY_PADDING;
+    u32      count;                   /*  8 */
+    u32      cmd;                     /* 12 */
+} PACKED dom0_readconsole_t; /* 16 bytes */
+
+/* 
+ * Pin Domain to a particular CPU  (use -1 to unpin)
+ */
+#define DOM0_PINCPUDOMAIN     20
+typedef struct {
+    /* IN variables. */
+    domid_t      domain;              /*  0 */
+    s32          cpu;                 /*  4: -1 implies unpin */
+} PACKED dom0_pincpudomain_t; /* 8 bytes */
+
+/* Get trace buffers physical base pointer */
+#define DOM0_GETTBUFS         21
+typedef struct {
+    /* OUT variables */
+    memory_t phys_addr;   /*  0: location of the trace buffers       */
+    MEMORY_PADDING;
+    u32      size;        /*  8: size of each trace buffer, in bytes */
+} PACKED dom0_gettbufs_t; /* 12 bytes */
+
+/*
+ * Get physical information about the host machine
+ */
+#define DOM0_PHYSINFO         22
+typedef struct {
+    u32      ht_per_core;             /*  0 */
+    u32      cores;                   /*  4 */
+    u32      cpu_khz;                 /*  8 */
+    u32      __pad;                   /* 12 */
+    memory_t total_pages;             /* 16 */
+    MEMORY_PADDING;
+    memory_t free_pages;              /* 24 */
+    MEMORY_PADDING;
+} PACKED dom0_physinfo_t; /* 32 bytes */
+
+/* 
+ * Allow a domain access to a physical PCI device
+ */
+#define DOM0_PCIDEV_ACCESS    23
+typedef struct {
+    /* IN variables. */
+    domid_t      domain;              /*  0 */
+    u32          bus;                 /*  4 */
+    u32          dev;                 /*  8 */
+    u32          func;                /* 12 */
+    u32          enable;              /* 16 */
+} PACKED dom0_pcidev_access_t; /* 20 bytes */
+
+/*
+ * Get the ID of the current scheduler.
+ */
+#define DOM0_SCHED_ID        24
+typedef struct {
+    /* OUT variable */
+    u32 sched_id;                     /*  0 */
+} PACKED dom0_sched_id_t; /* 4 bytes */
+
+/* 
+ * Control shadow pagetables operation
+ */
+#define DOM0_SHADOW_CONTROL  25
+
+#define DOM0_SHADOW_CONTROL_OP_OFF         0
+#define DOM0_SHADOW_CONTROL_OP_ENABLE_TEST 1
+#define DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY 2
+#define DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE 3
+#define DOM0_SHADOW_CONTROL_OP_FLUSH       10     /* table ops */
+#define DOM0_SHADOW_CONTROL_OP_CLEAN       11
+#define DOM0_SHADOW_CONTROL_OP_PEEK        12
+#define DOM0_SHADOW_CONTROL_OP_CLEAN2      13
+
+typedef struct dom0_shadow_control
+{
+    u32 fault_count;
+    u32 dirty_count;
+    u32 dirty_net_count;     
+    u32 dirty_block_count;     
+} dom0_shadow_control_stats_t;
+
+typedef struct {
+    /* IN variables. */
+    domid_t        domain;            /*  0 */
+    u32            op;                /*  4 */
+    unsigned long *dirty_bitmap;      /*  8: pointer to locked buffer */
+    MEMORY_PADDING;
+    /* IN/OUT variables. */
+    memory_t       pages;  /* 16: size of buffer, updated with actual size */
+    MEMORY_PADDING;
+    /* OUT variables. */
+    dom0_shadow_control_stats_t stats;
+} PACKED dom0_shadow_control_t;
+
+
+#define DOM0_SETDOMAINNAME     26
+typedef struct {
+    /* IN variables. */
+    domid_t  domain;                  /*  0 */
+    u8       name[MAX_DOMAIN_NAME];   /*  4 */
+} PACKED dom0_setdomainname_t; /* 20 bytes */
+
+#define DOM0_SETDOMAININITIALMEM   27
+typedef struct {
+    /* IN variables. */
+    domid_t     domain;               /*  0 */
+    u32         __pad;
+    memory_t    initial_memkb;        /*  8 */
+    MEMORY_PADDING;
+} PACKED dom0_setdomaininitialmem_t; /* 16 bytes */
+
+#define DOM0_SETDOMAINMAXMEM   28
+typedef struct {
+    /* IN variables. */
+    domid_t     domain;               /*  0 */
+    u32         __pad;
+    memory_t    max_memkb;            /*  8 */
+    MEMORY_PADDING;
+} PACKED dom0_setdomainmaxmem_t; /* 16 bytes */
+
+#define DOM0_GETPAGEFRAMEINFO2 29   /* batched interface */
+typedef struct {
+    /* IN variables. */
+    domid_t  domain;                  /*  0 */
+    u32         __pad;
+    memory_t num;                     /*  8 */
+    MEMORY_PADDING;
+    /* IN/OUT variables. */
+    unsigned long *array;             /* 16 */
+    MEMORY_PADDING;
+} PACKED dom0_getpageframeinfo2_t; /* 24 bytes */
+
+typedef struct {
+    u32 cmd;                          /* 0 */
+    u32 interface_version;            /* 4 */ /* DOM0_INTERFACE_VERSION */
+    union {                           /* 8 */
+       u32                      dummy[18]; /* 72 bytes */
+        dom0_createdomain_t      createdomain;
+        dom0_pausedomain_t       pausedomain;
+        dom0_unpausedomain_t     unpausedomain;
+        dom0_destroydomain_t     destroydomain;
+        dom0_getmemlist_t        getmemlist;
+        dom0_schedctl_t          schedctl;
+        dom0_adjustdom_t         adjustdom;
+        dom0_builddomain_t       builddomain;
+        dom0_getdomaininfo_t     getdomaininfo;
+        dom0_getpageframeinfo_t  getpageframeinfo;
+        dom0_iopl_t              iopl;
+       dom0_msr_t               msr;
+       dom0_debug_t             debug;
+       dom0_settime_t           settime;
+       dom0_readconsole_t       readconsole;
+       dom0_pincpudomain_t      pincpudomain;
+        dom0_gettbufs_t          gettbufs;
+        dom0_physinfo_t          physinfo;
+        dom0_pcidev_access_t     pcidev_access;
+        dom0_sched_id_t          sched_id;
+       dom0_shadow_control_t    shadow_control;
+       dom0_setdomainname_t     setdomainname;
+       dom0_setdomaininitialmem_t setdomaininitialmem;
+       dom0_setdomainmaxmem_t   setdomainmaxmem;
+       dom0_getpageframeinfo2_t getpageframeinfo2;
+    } PACKED u;
+} PACKED dom0_op_t; /* 80 bytes */
+
+#endif /* __DOM0_OPS_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/event_channel.h b/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/event_channel.h
new file mode 100644 (file)
index 0000000..5868cab
--- /dev/null
@@ -0,0 +1,127 @@
+/******************************************************************************
+ * event_channel.h
+ * 
+ * Event channels between domains.
+ * 
+ * Copyright (c) 2003-2004, K A Fraser.
+ */
+
+#ifndef __HYPERVISOR_IFS__EVENT_CHANNEL_H__
+#define __HYPERVISOR_IFS__EVENT_CHANNEL_H__
+
+/*
+ * EVTCHNOP_bind_interdomain: Open an event channel between <dom1> and <dom2>.
+ * NOTES:
+ *  1. <dom1> and/or <dom2> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may create an event channel.
+ *  3. <port1> and <port2> are only supplied if the op succeeds.
+ */
+#define EVTCHNOP_bind_interdomain 0
+typedef struct {
+    /* IN parameters. */
+    domid_t dom1, dom2;               /*  0,  4 */
+    /* OUT parameters. */
+    u32     port1, port2;             /*  8, 12 */
+} PACKED evtchn_bind_interdomain_t; /* 16 bytes */
+
+/*
+ * EVTCHNOP_bind_virq: Bind a local event channel to IRQ <irq>.
+ * NOTES:
+ *  1. A virtual IRQ may be bound to at most one event channel per domain.
+ */
+#define EVTCHNOP_bind_virq        1
+typedef struct {
+    /* IN parameters. */
+    u32 virq;                         /*  0 */
+    /* OUT parameters. */
+    u32 port;                         /*  4 */
+} PACKED evtchn_bind_virq_t; /* 8 bytes */
+
+/*
+ * EVTCHNOP_bind_pirq: Bind a local event channel to IRQ <irq>.
+ * NOTES:
+ *  1. A physical IRQ may be bound to at most one event channel per domain.
+ *  2. Only a sufficiently-privileged domain may bind to a physical IRQ.
+ */
+#define EVTCHNOP_bind_pirq        2
+typedef struct {
+    /* IN parameters. */
+    u32 pirq;                         /*  0 */
+#define BIND_PIRQ__WILL_SHARE 1
+    u32 flags; /* BIND_PIRQ__* */     /*  4 */
+    /* OUT parameters. */
+    u32 port;                         /*  8 */
+} PACKED evtchn_bind_pirq_t; /* 12 bytes */
+
+/*
+ * EVTCHNOP_close: Close the communication channel which has an endpoint at
+ * <dom, port>.
+ * NOTES:
+ *  1. <dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may close an event channel
+ *     for which <dom> is not DOMID_SELF.
+ */
+#define EVTCHNOP_close            3
+typedef struct {
+    /* IN parameters. */
+    domid_t dom;                      /*  0 */
+    u32     port;                     /*  4 */
+    /* No OUT parameters. */
+} PACKED evtchn_close_t; /* 8 bytes */
+
+/*
+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
+ * endpoint is <DOMID_SELF, local_port>.
+ */
+#define EVTCHNOP_send             4
+typedef struct {
+    /* IN parameters. */
+    u32     local_port;               /*  0 */
+    /* No OUT parameters. */
+} PACKED evtchn_send_t; /* 4 bytes */
+
+/*
+ * EVTCHNOP_status: Get the current status of the communication channel which
+ * has an endpoint at <dom, port>.
+ * NOTES:
+ *  1. <dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may obtain the status of an event
+ *     channel for which <dom> is not DOMID_SELF.
+ */
+#define EVTCHNOP_status           5
+typedef struct {
+    /* IN parameters */
+    domid_t dom;                      /*  0 */
+    u32     port;                     /*  4 */
+    /* OUT parameters */
+#define EVTCHNSTAT_closed       0  /* Chennel is not in use.                 */
+#define EVTCHNSTAT_unbound      1  /* Channel is not bound to a source.      */
+#define EVTCHNSTAT_interdomain  2  /* Channel is connected to remote domain. */
+#define EVTCHNSTAT_pirq         3  /* Channel is bound to a phys IRQ line.   */
+#define EVTCHNSTAT_virq         4  /* Channel is bound to a virtual IRQ line */
+    u32     status;                   /*  8 */
+    union {                           /* 12 */
+        struct {
+            domid_t dom;                              /* 12 */
+            u32     port;                             /* 16 */
+        } PACKED interdomain; /* EVTCHNSTAT_interdomain */
+        u32 pirq;      /* EVTCHNSTAT_pirq        */   /* 12 */
+        u32 virq;      /* EVTCHNSTAT_virq        */   /* 12 */
+    } PACKED u;
+} PACKED evtchn_status_t; /* 20 bytes */
+
+typedef struct {
+    u32 cmd; /* EVTCHNOP_* */         /*  0 */
+    u32 __reserved;                   /*  4 */
+    union {                           /*  8 */
+        evtchn_bind_interdomain_t bind_interdomain;
+        evtchn_bind_virq_t        bind_virq;
+        evtchn_bind_pirq_t        bind_pirq;
+        evtchn_close_t            close;
+        evtchn_send_t             send;
+        evtchn_status_t           status;
+        u8                        __dummy[24];
+    } PACKED u;
+} PACKED evtchn_op_t; /* 32 bytes */
+
+#endif /* __HYPERVISOR_IFS__EVENT_CHANNEL_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/hypervisor-if.h b/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/hypervisor-if.h
new file mode 100644 (file)
index 0000000..b6231e5
--- /dev/null
@@ -0,0 +1,395 @@
+/******************************************************************************
+ * hypervisor-if.h
+ * 
+ * Guest OS interface to Xen.
+ */
+
+#ifndef __HYPERVISOR_IF_H__
+#define __HYPERVISOR_IF_H__
+
+/* GCC-specific way to pack structure definitions (no implicit padding). */
+#define PACKED __attribute__ ((packed))
+
+#if defined(__i386__)
+#include "arch-x86_32.h"
+#elif defined(__x86_64__)
+#include "arch-x86_64.h"
+#else
+#error "Unsupported architecture"
+#endif
+
+/*
+ * HYPERVISOR "SYSTEM CALLS"
+ */
+
+/* EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. */
+#define __HYPERVISOR_set_trap_table        0
+#define __HYPERVISOR_mmu_update            1
+#define __HYPERVISOR_set_gdt               2
+#define __HYPERVISOR_stack_switch          3
+#define __HYPERVISOR_set_callbacks         4
+#define __HYPERVISOR_fpu_taskswitch        5
+#define __HYPERVISOR_sched_op              6
+#define __HYPERVISOR_dom0_op               7
+#define __HYPERVISOR_set_debugreg          8
+#define __HYPERVISOR_get_debugreg          9
+#define __HYPERVISOR_update_descriptor    10
+#define __HYPERVISOR_set_fast_trap        11
+#define __HYPERVISOR_dom_mem_op           12
+#define __HYPERVISOR_multicall            13
+#define __HYPERVISOR_update_va_mapping    14
+#define __HYPERVISOR_set_timer_op         15
+#define __HYPERVISOR_event_channel_op     16
+#define __HYPERVISOR_xen_version          17
+#define __HYPERVISOR_console_io           18
+#define __HYPERVISOR_physdev_op           19
+#define __HYPERVISOR_update_va_mapping_otherdomain 20
+
+/*
+ * MULTICALLS
+ * 
+ * Multicalls are listed in an array, with each element being a fixed size 
+ * (BYTES_PER_MULTICALL_ENTRY). Each is of the form (op, arg1, ..., argN)
+ * where each element of the tuple is a machine word. 
+ */
+#define ARGS_PER_MULTICALL_ENTRY 8
+
+
+/* 
+ * VIRTUAL INTERRUPTS
+ * 
+ * Virtual interrupts that a guest OS may receive from the hypervisor.
+ */
+#define VIRQ_MISDIRECT  0  /* Catch-all interrupt for unbound VIRQs.      */
+#define VIRQ_TIMER      1  /* Timebase update, and/or requested timeout.  */
+#define VIRQ_DEBUG      2  /* Request guest to dump debug info.           */
+#define VIRQ_CONSOLE    3  /* (DOM0) bytes received on emergency console. */
+#define VIRQ_DOM_EXC    4  /* (DOM0) Exceptional event for some domain.   */
+#define NR_VIRQS        5
+
+/*
+ * MMU-UPDATE REQUESTS
+ * 
+ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
+ * ptr[1:0] specifies the appropriate MMU_* command.
+ * 
+ * GPS (General-Purpose Subject)
+ * -----------------------------
+ *  This domain that must own all non-page-table pages that are involved in
+ *  MMU updates. By default it is the domain that executes mmu_update(). If the
+ *  caller has sufficient privilege then it can be changed by executing
+ *  MMUEXT_SET_SUBJECTDOM.
+ * 
+ * PTS (Page-Table Subject)
+ * ------------------------
+ *  This domain must own all the page-table pages that are subject to MMU
+ *  updates. By default it is the domain that executes mmu_update(). If the
+ *  caller has sufficient privilege then it can be changed by executing
+ *  MMUEXT_SET_SUBJECTDOM with val[14] (SET_PAGETABLE_SUBJECTDOM) set.
+ * 
+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
+ * Updates an entry in a page table.
+ * ptr[:2]  -- machine address of the page-table entry to modify [1]
+ * val      -- value to write [2]
+ * 
+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
+ * Updates an entry in the machine->pseudo-physical mapping table.
+ * ptr[:2]  -- machine address within the frame whose mapping to modify [3]
+ * val      -- value to write into the mapping entry
+ *  
+ * ptr[1:0] == MMU_EXTENDED_COMMAND:
+ * val[7:0] -- MMUEXT_* command
+ * 
+ *   val[7:0] == MMUEXT_(UN)PIN_*_TABLE:
+ *   ptr[:2]  -- machine address of frame to be (un)pinned as a p.t. page [1]
+ * 
+ *   val[7:0] == MMUEXT_NEW_BASEPTR:
+ *   ptr[:2]  -- machine address of new page-table base to install in MMU [1]
+ * 
+ *   val[7:0] == MMUEXT_TLB_FLUSH:
+ *   no additional arguments
+ * 
+ *   val[7:0] == MMUEXT_INVLPG:
+ *   ptr[:2]  -- linear address to be flushed from the TLB
+ * 
+ *   val[7:0] == MMUEXT_SET_LDT:
+ *   ptr[:2]  -- linear address of LDT base (NB. must be page-aligned)
+ *   val[:8]  -- number of entries in LDT
+ * 
+ *   val[7:0] == MMUEXT_SET_SUBJECTDOM:
+ *   val[14]  -- if TRUE then sets the PTS in addition to the GPS.
+ *   (ptr[31:15],val[31:15]) -- dom[31:0]
+ * 
+ *   val[7:0] == MMUEXT_REASSIGN_PAGE:
+ *   ptr[:2]  -- machine address within page to be reassigned to the GPS.
+ * 
+ *   val[7:0] == MMUEXT_RESET_SUBJECTDOM:
+ *   Resets both the GPS and the PTS to their defaults (i.e., calling domain).
+ * 
+ * Notes on constraints on the above arguments:
+ *  [1] The page frame containing the machine address must belong to the PTS.
+ *  [2] If the PTE is valid (i.e., bit 0 is set) then the specified page frame
+ *      must belong to: 
+ *       (a) the PTS (if the PTE is part of a non-L1 table); or
+ *       (b) the GPS (if the PTE is part of an L1 table).
+ *  [3] The page frame containing the machine address must belong to the GPS.
+ */
+#define MMU_NORMAL_PT_UPDATE     0 /* checked '*ptr = val'. ptr is MA.       */
+#define MMU_MACHPHYS_UPDATE      2 /* ptr = MA of frame to modify entry for  */
+#define MMU_EXTENDED_COMMAND     3 /* least 8 bits of val demux further      */
+#define MMUEXT_PIN_L1_TABLE      0 /* ptr = MA of frame to pin               */
+#define MMUEXT_PIN_L2_TABLE      1 /* ptr = MA of frame to pin               */
+#define MMUEXT_PIN_L3_TABLE      2 /* ptr = MA of frame to pin               */
+#define MMUEXT_PIN_L4_TABLE      3 /* ptr = MA of frame to pin               */
+#define MMUEXT_UNPIN_TABLE       4 /* ptr = MA of frame to unpin             */
+#define MMUEXT_NEW_BASEPTR       5 /* ptr = MA of new pagetable base         */
+#define MMUEXT_TLB_FLUSH         6 /* ptr = NULL                             */
+#define MMUEXT_INVLPG            7 /* ptr = VA to invalidate                 */
+#define MMUEXT_SET_LDT           8 /* ptr = VA of table; val = # entries     */
+#define MMUEXT_SET_SUBJECTDOM    9 /* (ptr[31:15],val[31:15]) = dom[31:0]    */
+#define SET_PAGETABLE_SUBJECTDOM (1<<14) /* OR into 'val' arg of SUBJECTDOM  */
+#define MMUEXT_REASSIGN_PAGE    10
+#define MMUEXT_RESET_SUBJECTDOM 11
+#define MMUEXT_CMD_MASK        255
+#define MMUEXT_CMD_SHIFT         8
+
+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
+#define UVMF_FLUSH_TLB          1 /* Flush entire TLB. */
+#define UVMF_INVLPG             2 /* Flush the VA mapping being updated. */
+
+
+/*
+ * Commands to HYPERVISOR_sched_op().
+ */
+#define SCHEDOP_yield           0   /* Give up the CPU voluntarily.       */
+#define SCHEDOP_block           1   /* Block until an event is received.  */
+#define SCHEDOP_shutdown        2   /* Stop executing this domain.        */
+#define SCHEDOP_cmdmask       255   /* 8-bit command. */
+#define SCHEDOP_reasonshift     8   /* 8-bit reason code. (SCHEDOP_shutdown) */
+
+/*
+ * Commands to HYPERVISOR_console_io().
+ */
+#define CONSOLEIO_write         0
+#define CONSOLEIO_read          1
+
+/*
+ * Commands to HYPERVISOR_dom_mem_op().
+ */
+#define MEMOP_increase_reservation 0
+#define MEMOP_decrease_reservation 1
+
+#ifndef __ASSEMBLY__
+
+typedef u32 domid_t;
+/* DOMID_SELF is used in certain contexts to refer to oneself. */
+#define DOMID_SELF (0x7FFFFFFEU)
+
+/*
+ * Send an array of these to HYPERVISOR_mmu_update().
+ * NB. The fields are natural pointer/address size for this architecture.
+ */
+typedef struct
+{
+    memory_t ptr;    /* Machine address of PTE. */
+    memory_t val;    /* New contents of PTE.    */
+} PACKED mmu_update_t;
+
+/*
+ * Send an array of these to HYPERVISOR_multicall().
+ * NB. The fields are natural register size for this architecture.
+ */
+typedef struct
+{
+    cpureg_t op;
+    cpureg_t args[7];
+} PACKED multicall_entry_t;
+
+/* Event channel endpoints per domain. */
+#define NR_EVENT_CHANNELS 1024
+
+/* No support for multi-processor guests. */
+#define MAX_VIRT_CPUS 1
+
+/*
+ * Xen/guestos shared data -- pointer provided in start_info.
+ * NB. We expect that this struct is smaller than a page.
+ */
+typedef struct shared_info_st
+{
+    /*
+     * Per-VCPU information goes here. This will be cleaned up more when Xen 
+     * actually supports multi-VCPU guests.
+     */
+    struct {
+        /*
+         * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
+         * a pending notification for a particular VCPU. It is then cleared 
+         * by the guest OS /before/ checking for pending work, thus avoiding
+         * a set-and-check race. Note that the mask is only accessed by Xen
+         * on the CPU that is currently hosting the VCPU. This means that the
+         * pending and mask flags can be updated by the guest without special
+         * synchronisation (i.e., no need for the x86 LOCK prefix).
+         * This may seem suboptimal because if the pending flag is set by
+         * a different CPU then an IPI may be scheduled even when the mask
+         * is set. However, note:
+         *  1. The task of 'interrupt holdoff' is covered by the per-event-
+         *     channel mask bits. A 'noisy' event that is continually being
+         *     triggered can be masked at source at this very precise
+         *     granularity.
+         *  2. The main purpose of the per-VCPU mask is therefore to restrict
+         *     reentrant execution: whether for concurrency control, or to
+         *     prevent unbounded stack usage. Whatever the purpose, we expect
+         *     that the mask will be asserted only for short periods at a time,
+         *     and so the likelihood of a 'spurious' IPI is suitably small.
+         * The mask is read before making an event upcall to the guest: a
+         * non-zero mask therefore guarantees that the VCPU will not receive
+         * an upcall activation. The mask is cleared when the VCPU requests
+         * to block: this avoids wakeup-waiting races.
+         */
+        u8 evtchn_upcall_pending;
+        u8 evtchn_upcall_mask;
+        u8 pad0, pad1;
+    } PACKED vcpu_data[MAX_VIRT_CPUS];  /*   0 */
+
+    /*
+     * A domain can have up to 1024 "event channels" on which it can send
+     * and receive asynchronous event notifications. There are three classes
+     * of event that are delivered by this mechanism:
+     *  1. Bi-directional inter- and intra-domain connections. Domains must
+     *     arrange out-of-band to set up a connection (usually the setup
+     *     is initiated and organised by a privileged third party such as
+     *     software running in domain 0).
+     *  2. Physical interrupts. A domain with suitable hardware-access
+     *     privileges can bind an event-channel port to a physical interrupt
+     *     source.
+     *  3. Virtual interrupts ('events'). A domain can bind an event-channel
+     *     port to a virtual interrupt source, such as the virtual-timer
+     *     device or the emergency console.
+     * 
+     * Event channels are addressed by a "port index" between 0 and 1023.
+     * Each channel is associated with two bits of information:
+     *  1. PENDING -- notifies the domain that there is a pending notification
+     *     to be processed. This bit is cleared by the guest.
+     *  2. MASK -- if this bit is clear then a 0->1 transition of PENDING
+     *     will cause an asynchronous upcall to be scheduled. This bit is only
+     *     updated by the guest. It is read-only within Xen. If a channel
+     *     becomes pending while the channel is masked then the 'edge' is lost
+     *     (i.e., when the channel is unmasked, the guest must manually handle
+     *     pending notifications as no upcall will be scheduled by Xen).
+     * 
+     * To expedite scanning of pending notifications, any 0->1 pending
+     * transition on an unmasked channel causes a corresponding bit in a
+     * 32-bit selector to be set. Each bit in the selector covers a 32-bit
+     * word in the PENDING bitfield array.
+     */
+    u32 evtchn_pending[32];             /*   4 */
+    u32 evtchn_pending_sel;             /* 132 */
+    u32 evtchn_mask[32];                /* 136 */
+
+    /*
+     * Time: The following abstractions are exposed: System Time, Clock Time,
+     * Domain Virtual Time. Domains can access Cycle counter time directly.
+     */
+    u64                cpu_freq;        /* 264: CPU frequency (Hz).          */
+
+    /*
+     * The following values are updated periodically (and not necessarily
+     * atomically!). The guest OS detects this because 'time_version1' is
+     * incremented just before updating these values, and 'time_version2' is
+     * incremented immediately after. See the Xen-specific Linux code for an
+     * example of how to read these values safely (arch/xen/kernel/time.c).
+     */
+    u32                time_version1;   /* 272 */
+    u32                time_version2;   /* 276 */
+    tsc_timestamp_t    tsc_timestamp;   /* TSC at last update of time vals.  */
+    u64                system_time;     /* Time, in nanosecs, since boot.    */
+    u32                wc_sec;          /* Secs  00:00:00 UTC, Jan 1, 1970.  */
+    u32                wc_usec;         /* Usecs 00:00:00 UTC, Jan 1, 1970.  */
+    u64                domain_time;     /* Domain virtual time, in nanosecs. */
+
+    /*
+     * Timeout values:
+     * Allow a domain to specify a timeout value in system time and 
+     * domain virtual time.
+     */
+    u64                wall_timeout;    /* 312 */
+    u64                domain_timeout;  /* 320 */
+
+    execution_context_t execution_context; /* 328 */
+
+} PACKED shared_info_t;
+
+/*
+ * Start-of-day memory layout for the initial domain (DOM0):
+ *  1. The domain is started within contiguous virtual-memory region.
+ *  2. The contiguous region begins and ends on an aligned 4MB boundary.
+ *  3. The region start corresponds to the load address of the OS image.
+ *     If the load address is not 4MB aligned then the address is rounded down.
+ *  4. This the order of bootstrap elements in the initial virtual region:
+ *      a. relocated kernel image
+ *      b. initial ram disk              [mod_start, mod_len]
+ *      c. list of allocated page frames [mfn_list, nr_pages]
+ *      d. bootstrap page tables         [pt_base, CR3 (x86)]
+ *      e. start_info_t structure        [register ESI (x86)]
+ *      f. bootstrap stack               [register ESP (x86)]
+ *  5. Bootstrap elements are packed together, but each is 4kB-aligned.
+ *  6. The initial ram disk may be omitted.
+ *  7. The list of page frames forms a contiguous 'pseudo-physical' memory
+ *     layout for the domain. In particular, the bootstrap virtual-memory
+ *     region is a 1:1 mapping to the first section of the pseudo-physical map.
+ *  8. All bootstrap elements are mapped read-writeable for the guest OS. The
+ *     only exception is the bootstrap page table, which is mapped read-only.
+ *  9. There is guaranteed to be at least 512kB padding after the final
+ *     bootstrap element. If necessary, the bootstrap virtual region is
+ *     extended by an extra 4MB to ensure this.
+ */
+
+/*
+ * This is the basic bootstrap information structure as passed by Xen to the
+ * initial controller domain. We want this structure to be easily extended by
+ * more sophisticated domain builders and controllers, so we make the basic
+ * fields of this structure available via a BASIC_START_INFO macro.
+ * 
+ * Extended version of start_info_t should be defined as:
+ *  typedef struct {
+ *      BASIC_START_INFO;
+ *      <...extra fields...>
+ *  } extended_start_info_t;
+ */
+#define MAX_CMDLINE 256
+#define BASIC_START_INFO                                                      \
+    /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME.     */ \
+    memory_t nr_pages;       /*  0: Total pages allocated to this domain. */  \
+    _MEMORY_PADDING(A);                                                       \
+    memory_t shared_info;    /*  8: MACHINE address of shared info struct.*/  \
+    _MEMORY_PADDING(B);                                                       \
+    u32      flags;          /* 16: SIF_xxx flags.                        */  \
+    u32      __pad;                                                           \
+    /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME).      */ \
+    memory_t pt_base;        /* 24: VIRTUAL address of page directory.    */  \
+    _MEMORY_PADDING(C);                                                       \
+    memory_t nr_pt_frames;   /* 32: Number of bootstrap p.t. frames.      */  \
+    _MEMORY_PADDING(D);                                                       \
+    memory_t mfn_list;       /* 40: VIRTUAL address of page-frame list.   */  \
+    _MEMORY_PADDING(E);                                                       \
+    memory_t mod_start;      /* 48: VIRTUAL address of pre-loaded module. */  \
+    _MEMORY_PADDING(F);                                                       \
+    memory_t mod_len;        /* 56: Size (bytes) of pre-loaded module.    */  \
+    _MEMORY_PADDING(G);                                                       \
+    u8 cmd_line[MAX_CMDLINE] /* 64 */
+
+typedef struct {
+    BASIC_START_INFO;
+} PACKED start_info_t; /* 320 bytes */
+
+/* These flags are passed in the 'flags' field of start_info_t. */
+#define SIF_PRIVILEGED    (1<<0)  /* Is the domain privileged? */
+#define SIF_INITDOMAIN    (1<<1)  /* Is this the initial control domain? */
+
+/* For use in guest OSes. */
+extern shared_info_t *HYPERVISOR_shared_info;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __HYPERVISOR_IF_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/physdev.h b/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/physdev.h
new file mode 100644 (file)
index 0000000..4e1aa13
--- /dev/null
@@ -0,0 +1,80 @@
+/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
+ ****************************************************************************
+ * (c) 2004 - Rolf Neugebauer - Intel Research Cambridge
+ * (c) 2004 - Keir Fraser - University of Cambridge
+ ****************************************************************************
+ * Description: Interface for domains to access physical devices on the PCI bus
+ */
+
+#ifndef __HYPERVISOR_IFS_PHYSDEV_H__
+#define __HYPERVISOR_IFS_PHYSDEV_H__
+
+/* Commands to HYPERVISOR_physdev_op() */
+#define PHYSDEVOP_PCI_CFGREG_READ       0
+#define PHYSDEVOP_PCI_CFGREG_WRITE      1
+#define PHYSDEVOP_PCI_INITIALISE_DEVICE 2
+#define PHYSDEVOP_PCI_PROBE_ROOT_BUSES  3
+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY     4
+#define PHYSDEVOP_IRQ_STATUS_QUERY      5
+
+/* Read from PCI configuration space. */
+typedef struct {
+    /* IN */
+    u32 bus;                          /*  0 */
+    u32 dev;                          /*  4 */
+    u32 func;                         /*  8 */
+    u32 reg;                          /* 12 */
+    u32 len;                          /* 16 */
+    /* OUT */
+    u32 value;                        /* 20 */
+} PACKED physdevop_pci_cfgreg_read_t; /* 24 bytes */
+
+/* Write to PCI configuration space. */
+typedef struct {
+    /* IN */
+    u32 bus;                          /*  0 */
+    u32 dev;                          /*  4 */
+    u32 func;                         /*  8 */
+    u32 reg;                          /* 12 */
+    u32 len;                          /* 16 */
+    u32 value;                        /* 20 */
+} PACKED physdevop_pci_cfgreg_write_t; /* 24 bytes */
+
+/* Do final initialisation of a PCI device (e.g., last-moment IRQ routing). */
+typedef struct {
+    /* IN */
+    u32 bus;                          /*  0 */
+    u32 dev;                          /*  4 */
+    u32 func;                         /*  8 */
+} PACKED physdevop_pci_initialise_device_t; /* 12 bytes */
+
+/* Find the root buses for subsequent scanning. */
+typedef struct {
+    /* OUT */
+    u32 busmask[256/32];              /*  0 */
+} PACKED physdevop_pci_probe_root_buses_t; /* 32 bytes */
+
+typedef struct {
+    /* IN */
+    u32 irq;                          /*  0 */
+    /* OUT */
+/* Need to call PHYSDEVOP_IRQ_UNMASK_NOTIFY when the IRQ has been serviced? */
+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY (1<<0)
+    u32 flags;                        /*  4 */
+} PACKED physdevop_irq_status_query_t; /* 8 bytes */
+
+typedef struct _physdev_op_st 
+{
+    u32 cmd;                          /*  0 */
+    u32 __pad;                        /*  4 */
+    union {                           /*  8 */
+        physdevop_pci_cfgreg_read_t       pci_cfgreg_read;
+        physdevop_pci_cfgreg_write_t      pci_cfgreg_write;
+        physdevop_pci_initialise_device_t pci_initialise_device;
+        physdevop_pci_probe_root_buses_t  pci_probe_root_buses;
+        physdevop_irq_status_query_t      irq_status_query;
+        u8                                __dummy[32];
+    } PACKED u;
+} PACKED physdev_op_t; /* 40 bytes */
+
+#endif /* __HYPERVISOR_IFS_PHYSDEV_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/sched_ctl.h b/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/sched_ctl.h
new file mode 100644 (file)
index 0000000..17daa7a
--- /dev/null
@@ -0,0 +1,83 @@
+/**
+ * Generic scheduler control interface.
+ *
+ * Mark Williamson, (C) 2004 Intel Research Cambridge
+ */
+
+#ifndef __SCHED_CTL_H__
+#define __SCHED_CTL_H__
+
+/* Scheduler types */
+#define SCHED_BVT      0
+#define SCHED_FBVT     1
+#define SCHED_ATROPOS  2
+#define SCHED_RROBIN   3
+
+/* these describe the intended direction used for a scheduler control or domain
+ * command */
+#define SCHED_INFO_PUT 0
+#define SCHED_INFO_GET 1
+
+/*
+ * Generic scheduler control command - used to adjust system-wide scheduler
+ * parameters
+ */
+struct sched_ctl_cmd
+{
+    u32 sched_id;                     /*  0 */
+    u32 direction;                    /*  4 */
+    union {                           /*  8 */
+        struct bvt_ctl
+        {
+            /* IN variables. */
+            u32 ctx_allow;            /*  8: context switch allowance */
+        } PACKED bvt;
+
+        struct fbvt_ctl
+        {
+            /* IN variables. */
+            u32 ctx_allow;            /*  8: context switch allowance */
+        } PACKED fbvt;
+
+        struct rrobin_ctl
+        {
+            /* IN variables */
+            u64 slice;                /*  8: round robin time slice */
+        } PACKED rrobin;
+    } PACKED u;
+} PACKED; /* 16 bytes */
+
+struct sched_adjdom_cmd
+{
+    u32     sched_id;                 /*  0 */
+    u32     direction;                /*  4 */
+    domid_t domain;                   /*  8 */
+    u32     __pad;
+    union {                           /* 16 */
+        struct bvt_adjdom
+        {
+            u32 mcu_adv;    /* 16: mcu advance: inverse of weight */
+            u32 warp;       /* 20: time warp */
+            u32 warpl;      /* 24: warp limit */
+            u32 warpu;      /* 28: unwarp time requirement */
+        } PACKED bvt;
+
+        struct fbvt_adjdom
+        {
+            u32 mcu_adv;    /* 16: mcu advance: inverse of weight */
+            u32 warp;       /* 20: time warp */
+            u32 warpl;      /* 24: warp limit */
+            u32 warpu;      /* 28: unwarp time requirement */
+        } PACKED fbvt;
+
+        struct atropos_adjdom
+        {
+            u64 nat_period; /* 16 */
+            u64 nat_slice;  /* 24 */
+            u64 latency;    /* 32 */
+            u32 xtratime;   /* 36 */
+        } PACKED atropos;
+    } PACKED u;
+} PACKED; /* 40 bytes */
+
+#endif /* __SCHED_CTL_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/trace.h b/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor-ifs/trace.h
new file mode 100644 (file)
index 0000000..b5458b9
--- /dev/null
@@ -0,0 +1,31 @@
+/******************************************************************************
+ * include/hypervisor-ifs/trace.h
+ */
+
+#ifndef __HYPERVISOR_IFS_TRACE_H__
+#define __HYPERVISOR_IFS_TRACE_H__
+
+/* This structure represents a single trace buffer record. */
+struct t_rec {
+    u64 cycles;               /* 64 bit cycle counter timestamp */
+    u32 event;                /* 32 bit event ID                */
+    u32 d1, d2, d3, d4, d5;   /* event data items               */
+};
+
+/*
+ * This structure contains the metadata for a single trace buffer.  The head
+ * field, indexes into an array of struct t_rec's.
+ */
+struct t_buf {
+    struct t_rec *data;     /* pointer to data area.  physical address
+                             * for convenience in user space code            */
+
+    unsigned long size;      /* size of the data area, in t_recs             */
+    unsigned long head;      /* array index of the most recent record        */
+
+    /* Kernel-private elements follow... */
+    struct t_rec *head_ptr; /* pointer to the head record                    */
+    struct t_rec *vdata;    /* virtual address pointer to data               */
+};
+
+#endif /* __HYPERVISOR_IFS_TRACE_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/multicall.h b/linux-2.6.7-xen-sparse/include/asm-xen/multicall.h
new file mode 100644 (file)
index 0000000..f0ea5c3
--- /dev/null
@@ -0,0 +1,84 @@
+/******************************************************************************
+ * multicall.h
+ */
+
+#ifndef __MULTICALL_H__
+#define __MULTICALL_H__
+
+#include <asm/hypervisor.h>
+
+extern multicall_entry_t multicall_list[];
+extern int nr_multicall_ents;
+
+static inline void queue_multicall0(unsigned long op)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall1(unsigned long op, unsigned long arg1)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall2(
+    unsigned long op, unsigned long arg1, unsigned long arg2)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    multicall_list[i].args[1] = arg2;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall3(
+    unsigned long op, unsigned long arg1, unsigned long arg2,
+    unsigned long arg3)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    multicall_list[i].args[1] = arg2;
+    multicall_list[i].args[2] = arg3;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall4(
+    unsigned long op, unsigned long arg1, unsigned long arg2,
+    unsigned long arg3, unsigned long arg4)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    multicall_list[i].args[1] = arg2;
+    multicall_list[i].args[2] = arg3;
+    multicall_list[i].args[3] = arg4;
+    nr_multicall_ents = i+1;
+}
+
+static inline void queue_multicall5(
+    unsigned long op, unsigned long arg1, unsigned long arg2,
+    unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{
+    int i = nr_multicall_ents;
+    multicall_list[i].op      = op;
+    multicall_list[i].args[0] = arg1;
+    multicall_list[i].args[1] = arg2;
+    multicall_list[i].args[2] = arg3;
+    multicall_list[i].args[3] = arg4;
+    multicall_list[i].args[4] = arg5;
+    nr_multicall_ents = i+1;
+}
+
+static inline void execute_multicall_list(void)
+{
+    if ( unlikely(nr_multicall_ents == 0) ) return;
+    (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
+    nr_multicall_ents = 0;
+}
+
+#endif /* __MULTICALL_H__ */
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/netif.h b/linux-2.6.7-xen-sparse/include/asm-xen/netif.h
new file mode 100644 (file)
index 0000000..098b292
--- /dev/null
@@ -0,0 +1,88 @@
+/******************************************************************************
+ * netif.h
+ * 
+ * Unified network-device I/O interface for Xen guest OSes.
+ * 
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __SHARED_NETIF_H__
+#define __SHARED_NETIF_H__
+
+typedef struct {
+    memory_t addr;   /*  0: Machine address of packet.  */
+    MEMORY_PADDING;
+    u16      id;     /*  8: Echoed in response message. */
+    u16      size;   /* 10: Packet size in bytes.       */
+} PACKED netif_tx_request_t; /* 12 bytes */
+
+typedef struct {
+    u16      id;     /*  0 */
+    s8       status; /*  2 */
+    u8       __pad;  /*  3 */
+} PACKED netif_tx_response_t; /* 4 bytes */
+
+typedef struct {
+    u16       id;    /*  0: Echoed in response message.        */
+} PACKED netif_rx_request_t; /* 2 bytes */
+
+typedef struct {
+    memory_t addr;   /*  0: Machine address of packet.              */
+    MEMORY_PADDING;
+    u16      id;     /*  8:  */
+    s16      status; /* 10: -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+} PACKED netif_rx_response_t; /* 12 bytes */
+
+/*
+ * We use a special capitalised type name because it is _essential_ that all 
+ * arithmetic on indexes is done on an integer type of the correct size.
+ */
+typedef u32 NETIF_RING_IDX;
+
+/*
+ * Ring indexes are 'free running'. That is, they are not stored modulo the
+ * size of the ring buffer. The following macros convert a free-running counter
+ * into a value that can directly index a ring-buffer array.
+ */
+#define MASK_NETIF_RX_IDX(_i) ((_i)&(NETIF_RX_RING_SIZE-1))
+#define MASK_NETIF_TX_IDX(_i) ((_i)&(NETIF_TX_RING_SIZE-1))
+
+#define NETIF_TX_RING_SIZE 256
+#define NETIF_RX_RING_SIZE 256
+
+/* This structure must fit in a memory page. */
+typedef struct {
+    /*
+     * Frontend places packets into ring at tx_req_prod.
+     * Frontend receives event when tx_resp_prod passes tx_event.
+     */
+    NETIF_RING_IDX req_prod;       /*  0 */
+    NETIF_RING_IDX resp_prod;      /*  4 */
+    NETIF_RING_IDX event;          /*  8 */
+    union {                        /* 12 */
+        netif_tx_request_t  req;
+        netif_tx_response_t resp;
+    } PACKED ring[NETIF_TX_RING_SIZE];
+} PACKED netif_tx_interface_t;
+
+/* This structure must fit in a memory page. */
+typedef struct {
+    /*
+     * Frontend places empty buffers into ring at rx_req_prod.
+     * Frontend receives event when rx_resp_prod passes rx_event.
+     */
+    NETIF_RING_IDX req_prod;       /*  0 */
+    NETIF_RING_IDX resp_prod;      /*  4 */
+    NETIF_RING_IDX event;          /*  8 */
+    union {                        /* 12 */
+        netif_rx_request_t  req;
+        netif_rx_response_t resp;
+    } PACKED ring[NETIF_RX_RING_SIZE];
+} PACKED netif_rx_interface_t;
+
+/* Descriptor status values */
+#define NETIF_RSP_DROPPED         -2
+#define NETIF_RSP_ERROR           -1
+#define NETIF_RSP_OKAY             0
+
+#endif
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/xen.h b/linux-2.6.7-xen-sparse/include/asm-xen/xen.h
new file mode 100644 (file)
index 0000000..9486b90
--- /dev/null
@@ -0,0 +1,3 @@
+
+/* arch/xen/kernel/process.c */
+void xen_cpu_idle (void);